code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
class lowerCamelCase__ : def __init__(self ) -> Tuple: _lowercase ='''''' _lowercase ='''''' _lowercase =[] def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: _lowercase =self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: _lowercase =self.__min_dist_top_down_dp(UpperCAmelCase , n - 1 ) _lowercase =self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase ) _lowercase =self.__min_dist_top_down_dp(m - 1 , n - 1 ) _lowercase =1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return self.dp[m][n] def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int: _lowercase =worda _lowercase =worda _lowercase =[[-1 for _ in range(len(UpperCAmelCase ) )] for _ in range(len(UpperCAmelCase ) )] return self.__min_dist_top_down_dp(len(UpperCAmelCase ) - 1 , len(UpperCAmelCase ) - 1 ) def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int: _lowercase =worda _lowercase =worda _lowercase =len(UpperCAmelCase ) _lowercase =len(UpperCAmelCase ) _lowercase =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty _lowercase =j elif j == 0: # second string is empty _lowercase =i elif worda[i - 1] == worda[j - 1]: # last characters are equal _lowercase =self.dp[i - 1][j - 1] else: _lowercase =self.dp[i][j - 1] _lowercase =self.dp[i - 1][j] _lowercase =self.dp[i - 1][j - 1] _lowercase =1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return self.dp[m][n] if __name__ == "__main__": UpperCAmelCase__ = EditDistance() print('''****************** Testing Edit Distance DP Algorithm ******************''') print() UpperCAmelCase__ = input('''Enter the first string: ''').strip() UpperCAmelCase__ = input('''Enter the second string: ''').strip() print() print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''') print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''') print() print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
5
from math import isqrt def UpperCAmelCase_ ( __snake_case ) -> list[int]: """simple docstring""" _lowercase =[True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowercase =False return [i for i in range(2 , __snake_case ) if is_prime[i]] def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int: """simple docstring""" _lowercase =calculate_prime_numbers(max_number // 2 ) _lowercase =0 _lowercase =0 _lowercase =len(__snake_case ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'''{solution() = }''')
5
1
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCamelCase = 2 class __lowerCamelCase : """simple docstring""" def __init__( self : List[Any] , *, # begin keyword-only arguments SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Dict=None , ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = bos, unk, pad, eos lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = {} lowerCAmelCase__ = self.add_symbol(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.add_symbol(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.add_symbol(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.add_symbol(SCREAMING_SNAKE_CASE__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = len(self.symbols ) def __eq__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return self.indices == other.indices def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Optional[Any] ): return len(self.symbols ) def __contains__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): return sym in self.indices @classmethod def a ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Any ): lowerCAmelCase__ = cls() d.add_from_file(SCREAMING_SNAKE_CASE__ ) return d def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Tuple=False ): if word in self.indices and not overwrite: lowerCAmelCase__ = self.indices[word] lowerCAmelCase__ = self.count[idx] + n return idx else: lowerCAmelCase__ = len(self.symbols ) lowerCAmelCase__ = idx self.symbols.append(SCREAMING_SNAKE_CASE__ ) self.count.append(SCREAMING_SNAKE_CASE__ ) return idx def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ): return 0 def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): try: with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(SCREAMING_SNAKE_CASE__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(SCREAMING_SNAKE_CASE__ ) ) return lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = self._load_meta(SCREAMING_SNAKE_CASE__ ) for line in lines[indices_start_line:]: try: lowerCAmelCase__ , lowerCAmelCase__ = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": lowerCAmelCase__ = True lowerCAmelCase__ , lowerCAmelCase__ = line.rsplit(" " , 1 ) else: lowerCAmelCase__ = False lowerCAmelCase__ = int(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(SCREAMING_SNAKE_CASE__ ) ) self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _A ( lowerCAmelCase_ : Dict ): """simple docstring""" lowerCAmelCase__ = dict((re.sub(r"@@$" , "" , lowerCAmelCase_ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , lowerCAmelCase_ ), v) for k, v in d.items() ) lowerCAmelCase__ = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] lowerCAmelCase__ = d[k] # restore return da def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ): """simple docstring""" if not os.path.exists(lowerCAmelCase_ ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "checkpoint.pt" ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" ) lowerCAmelCase__ = chkpt["cfg"]["model"] # dicts lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "dict.txt" ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError(F'path to the file {dict_file} does not exist!' ) lowerCAmelCase__ = Dictionary.load(lowerCAmelCase_ ) lowerCAmelCase__ = rewrite_dict_keys(src_dict.indices ) lowerCAmelCase__ = len(lowerCAmelCase_ ) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES["vocab_file"] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) ) # merges_file (bpecodes) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "bpecodes" ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ ) # model config lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "config.json" ) lowerCAmelCase__ = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-1_2, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) ) # tokenizer config lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) ) # model lowerCAmelCase__ = chkpt["model"] # remove unneeded keys lowerCAmelCase__ = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): lowerCAmelCase__ = model_state_dict.pop(lowerCAmelCase_ ) else: lowerCAmelCase__ = model_state_dict.pop(lowerCAmelCase_ ) lowerCAmelCase__ = BioGptConfig.from_pretrained(lowerCAmelCase_ ) lowerCAmelCase__ = BioGptForCausalLM(lowerCAmelCase_ ) # check that it loads ok model_new.load_state_dict(lowerCAmelCase_ ) # save lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowerCAmelCase_ , lowerCAmelCase_ ) print("Conversion is done!" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
364
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCamelCase = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCamelCase = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCamelCase = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } UpperCamelCase = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } UpperCamelCase = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } UpperCamelCase = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } UpperCamelCase = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } UpperCamelCase = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = VOCAB_FILES_NAMES snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION snake_case__ = DPRContextEncoderTokenizer class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = VOCAB_FILES_NAMES snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION snake_case__ = DPRQuestionEncoderTokenizer UpperCamelCase = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) UpperCamelCase = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) UpperCamelCase = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(UpperCamelCase__ ) class __lowerCamelCase : """simple docstring""" def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) elif titles is None or texts is None: lowerCAmelCase__ = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles] lowerCAmelCase__ = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts] lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE__ ) == len( SCREAMING_SNAKE_CASE__ ), f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.' lowerCAmelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"] lowerCAmelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"] lowerCAmelCase__ = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] } if return_attention_mask is not False: lowerCAmelCase__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCAmelCase__ = attention_mask return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) def a ( self : Dict , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> List[DPRSpanPrediction]: lowerCAmelCase__ = reader_input["input_ids"] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3] lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ ) lowerCAmelCase__ = [] for doc_id in sorted_docs: lowerCAmelCase__ = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCAmelCase__ = sequence_ids.index(self.pad_token_id ) else: lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ) -> List[DPRSpanPrediction]: lowerCAmelCase__ = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCAmelCase__ = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' lowerCAmelCase__ = end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCamelCase__ ) class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" snake_case__ = VOCAB_FILES_NAMES snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION snake_case__ = ["input_ids", "attention_mask"] snake_case__ = DPRReaderTokenizer
221
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json', 'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json', 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json' ), } class lowerCAmelCase__ ( lowerCAmelCase_ ): lowerCAmelCase : int = "longformer" def __init__( self : Optional[Any] , lowerCamelCase__ : Union[List[int], int] = 5_12 , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 3_05_22 , lowerCamelCase__ : int = 7_68 , lowerCamelCase__ : int = 12 , lowerCamelCase__ : int = 12 , lowerCamelCase__ : int = 30_72 , lowerCamelCase__ : str = "gelu" , lowerCamelCase__ : float = 0.1 , lowerCamelCase__ : float = 0.1 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 2 , lowerCamelCase__ : float = 0.0_2 , lowerCamelCase__ : float = 1E-12 , lowerCamelCase__ : bool = False , **lowerCamelCase__ : List[str] , ) ->int: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Dict = attention_window _UpperCAmelCase : int = sep_token_id _UpperCAmelCase : Union[str, Any] = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : Union[str, Any] = num_hidden_layers _UpperCAmelCase : Optional[Any] = num_attention_heads _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : List[Any] = intermediate_size _UpperCAmelCase : Dict = hidden_dropout_prob _UpperCAmelCase : List[Any] = attention_probs_dropout_prob _UpperCAmelCase : List[Any] = max_position_embeddings _UpperCAmelCase : Union[str, Any] = type_vocab_size _UpperCAmelCase : Dict = initializer_range _UpperCAmelCase : Union[str, Any] = layer_norm_eps _UpperCAmelCase : str = onnx_export class lowerCAmelCase__ ( lowerCAmelCase_ ): def __init__( self : List[Any] , lowerCamelCase__ : "PretrainedConfig" , lowerCamelCase__ : str = "default" , lowerCamelCase__ : "List[PatchingSpec]" = None ) ->int: '''simple docstring''' super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = True @property def lowerCAmelCase__ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": _UpperCAmelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def lowerCAmelCase__ ( self : Dict ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCAmelCase : List[Any] = super().outputs if self.task == "default": _UpperCAmelCase : str = {0: '''batch'''} return outputs @property def lowerCAmelCase__ ( self : str ) ->float: '''simple docstring''' return 1E-4 @property def lowerCAmelCase__ ( self : List[str] ) ->int: '''simple docstring''' return max(super().default_onnx_opset , 14 ) def lowerCAmelCase__ ( self : int , lowerCamelCase__ : "PreTrainedTokenizerBase" , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]: '''simple docstring''' _UpperCAmelCase : str = super().generate_dummy_inputs( preprocessor=lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _UpperCAmelCase : Any = torch.zeros_like(inputs["input_ids"] ) # make every second token global _UpperCAmelCase : Optional[int] = 1 return inputs
234
from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict = logging.get_logger(__name__) _A : Union[str, Any] = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Any = "vit_msn" def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict: super().__init__(**A ) lowerCamelCase__ : int = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : str = num_attention_heads lowerCamelCase__ : Tuple = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : Any = image_size lowerCamelCase__ : Any = patch_size lowerCamelCase__ : Union[str, Any] = num_channels lowerCamelCase__ : Tuple = qkv_bias
142
0
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = ['''image_processor''', '''tokenizer'''] snake_case = '''BlipImageProcessor''' snake_case = '''AutoTokenizer''' def __init__( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) # add QFormer tokenizer _A = qformer_tokenizer def __call__( self : List[Any] , __UpperCAmelCase : ImageInput = None , __UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Union[str, Any] , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify at least images or text." ) _A = BatchFeature() if text is not None: _A = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) encoding.update(__UpperCAmelCase ) _A = self.qformer_tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) _A = qformer_text_encoding.pop("input_ids" ) _A = qformer_text_encoding.pop("attention_mask" ) if images is not None: _A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) encoding.update(__UpperCAmelCase ) return encoding def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ): '''simple docstring''' if os.path.isfile(__UpperCAmelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) _A = os.path.join(__UpperCAmelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__UpperCAmelCase ) return super().save_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) @classmethod def lowerCAmelCase ( cls : Tuple , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = AutoTokenizer.from_pretrained(__UpperCAmelCase , subfolder="qformer_tokenizer" ) _A = cls._get_arguments_from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) args.append(__UpperCAmelCase ) return cls(*__UpperCAmelCase )
358
'''simple docstring''' import fire from utils import calculate_rouge, save_json def __lowercase ( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Optional[int]: '''simple docstring''' _A = [x.strip() for x in open(__lowercase ).readlines()] _A = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )] _A = calculate_rouge(__lowercase , __lowercase , **__lowercase ) if save_path is not None: save_json(__lowercase , __lowercase , indent=__lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
174
0
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 _lowerCAmelCase : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json") _lowerCAmelCase : List[Any] = get_tests_dir("fixtures/vocab.json") _lowerCAmelCase : List[str] = get_tests_dir("fixtures") class _UpperCamelCase ( unittest.TestCase ): UpperCAmelCase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def UpperCAmelCase_ ( self :Dict ) -> Dict: UpperCAmelCase__ = 0 def UpperCAmelCase_ ( self :List[str] ) -> Tuple: UpperCAmelCase__ = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def UpperCAmelCase_ ( self :str ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaConfig() UpperCAmelCase__ = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) # save in new folder model_config.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) UpperCAmelCase__ = AutoProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def UpperCAmelCase_ ( self :List[Any] ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(lowerCamelCase , os.path.join(lowerCamelCase , lowerCamelCase ) ) copyfile(lowerCamelCase , os.path.join(lowerCamelCase , "vocab.json" ) ) UpperCAmelCase__ = AutoProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def UpperCAmelCase_ ( self :Any ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaFeatureExtractor() UpperCAmelCase__ = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) UpperCAmelCase__ = WavaVecaProcessor(lowerCamelCase , lowerCamelCase ) # save in new folder processor.save_pretrained(lowerCamelCase ) # drop `processor_class` in tokenizer with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "r" ) as f: UpperCAmelCase__ = json.load(lowerCamelCase ) config_dict.pop("processor_class" ) with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "w" ) as f: f.write(json.dumps(lowerCamelCase ) ) UpperCAmelCase__ = AutoProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def UpperCAmelCase_ ( self :Optional[Any] ) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaFeatureExtractor() UpperCAmelCase__ = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) UpperCAmelCase__ = WavaVecaProcessor(lowerCamelCase , lowerCamelCase ) # save in new folder processor.save_pretrained(lowerCamelCase ) # drop `processor_class` in feature extractor with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "r" ) as f: UpperCAmelCase__ = json.load(lowerCamelCase ) config_dict.pop("processor_class" ) with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "w" ) as f: f.write(json.dumps(lowerCamelCase ) ) UpperCAmelCase__ = AutoProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def UpperCAmelCase_ ( self :Union[str, Any] ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaConfig(processor_class="Wav2Vec2Processor" ) model_config.save_pretrained(lowerCamelCase ) # copy relevant files copyfile(lowerCamelCase , os.path.join(lowerCamelCase , "vocab.json" ) ) # create emtpy sample processor with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "w" ) as f: f.write("{}" ) UpperCAmelCase__ = AutoProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) def UpperCAmelCase_ ( self :List[Any] ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase ): UpperCAmelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase ): UpperCAmelCase__ = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase ) UpperCAmelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) UpperCAmelCase__ = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) UpperCAmelCase__ = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version UpperCAmelCase__ = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase ) UpperCAmelCase__ = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def UpperCAmelCase_ ( self :List[Any] ) -> List[Any]: try: AutoConfig.register("custom" , lowerCamelCase ) AutoFeatureExtractor.register(lowerCamelCase , lowerCamelCase ) AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase ) AutoProcessor.register(lowerCamelCase , lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase ): AutoProcessor.register(lowerCamelCase , lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ = os.path.join(lowerCamelCase , "vocab.txt" ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) UpperCAmelCase__ = CustomTokenizer(lowerCamelCase ) UpperCAmelCase__ = CustomProcessor(lowerCamelCase , lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(lowerCamelCase ) UpperCAmelCase__ = AutoProcessor.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase , lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]: class _UpperCamelCase ( lowerCAmelCase ): UpperCAmelCase_ = False class _UpperCamelCase ( lowerCAmelCase ): UpperCAmelCase_ = False class _UpperCamelCase ( lowerCAmelCase ): UpperCAmelCase_ = """AutoFeatureExtractor""" UpperCAmelCase_ = """AutoTokenizer""" UpperCAmelCase_ = False try: AutoConfig.register("custom" , lowerCamelCase ) AutoFeatureExtractor.register(lowerCamelCase , lowerCamelCase ) AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase ) AutoProcessor.register(lowerCamelCase , lowerCamelCase ) # If remote code is not set, the default is to use local classes. UpperCAmelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. UpperCAmelCase__ = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. UpperCAmelCase__ = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self :str ) -> Union[str, Any]: UpperCAmelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" ) def UpperCAmelCase_ ( self :Union[str, Any] ) -> str: UpperCAmelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" ) self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" ) @is_staging_test class _UpperCamelCase ( unittest.TestCase ): UpperCAmelCase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def UpperCAmelCase_ ( cls :Optional[int] ) -> int: UpperCAmelCase__ = TOKEN HfFolder.save_token(lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls :List[str] ) -> Any: try: delete_repo(token=cls._token , repo_id="test-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-processor" ) except HTTPError: pass def UpperCAmelCase_ ( self :Dict ) -> Union[str, Any]: UpperCAmelCase__ = WavaVecaProcessor.from_pretrained(lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowerCamelCase , "test-processor" ) , push_to_hub=lowerCamelCase , use_auth_token=self._token ) UpperCAmelCase__ = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase , getattr(new_processor.feature_extractor , lowerCamelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def UpperCAmelCase_ ( self :str ) -> List[str]: UpperCAmelCase__ = WavaVecaProcessor.from_pretrained(lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowerCamelCase , "test-processor-org" ) , push_to_hub=lowerCamelCase , use_auth_token=self._token , organization="valid_org" , ) UpperCAmelCase__ = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase , getattr(new_processor.feature_extractor , lowerCamelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]: CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ = os.path.join(lowerCamelCase , "vocab.txt" ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) UpperCAmelCase__ = CustomTokenizer(lowerCamelCase ) UpperCAmelCase__ = CustomProcessor(lowerCamelCase , lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) UpperCAmelCase__ = Repository(lowerCamelCase , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(lowerCamelCase ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) ) as f: UpperCAmelCase__ = json.load(lowerCamelCase ) self.assertDictEqual( tokenizer_config["auto_map"] , { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase , "custom_feature_extraction.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase , "custom_tokenization.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase , "custom_processing.py" ) ) ) repo.push_to_hub() UpperCAmelCase__ = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=lowerCamelCase ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
169
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCamelCase ( lowerCAmelCase ): def __init__( self :int , lowerCamelCase :AutoencoderKL , lowerCamelCase :CLIPTextModel , lowerCamelCase :CLIPTokenizer , lowerCamelCase :UNetaDConditionModel , lowerCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase :StableDiffusionSafetyChecker , lowerCamelCase :CLIPImageProcessor , ) -> Optional[int]: super().__init__() self.register_modules( vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , ) def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Optional[Union[str, int]] = "auto" ) -> int: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCamelCase ) def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]: self.enable_attention_slicing(lowerCamelCase ) @torch.no_grad() def __call__( self :int , lowerCamelCase :Union[str, List[str]] , lowerCamelCase :int = 512 , lowerCamelCase :int = 512 , lowerCamelCase :int = 50 , lowerCamelCase :float = 7.5 , lowerCamelCase :Optional[Union[str, List[str]]] = None , lowerCamelCase :Optional[int] = 1 , lowerCamelCase :float = 0.0 , lowerCamelCase :Optional[torch.Generator] = None , lowerCamelCase :Optional[torch.FloatTensor] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , lowerCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase :int = 1 , lowerCamelCase :Optional[torch.FloatTensor] = None , **lowerCamelCase :List[str] , ) -> str: if isinstance(lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = 1 elif isinstance(lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = len(lowerCamelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(lowerCamelCase )}.''' ) # get prompt text embeddings UpperCAmelCase__ = self.tokenizer( lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) UpperCAmelCase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_embeddings.shape UpperCAmelCase__ = text_embeddings.repeat(1 , lowerCamelCase , 1 ) UpperCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase__ = 42 if negative_prompt is None: UpperCAmelCase__ = [""] elif type(lowerCamelCase ) is not type(lowerCamelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !=''' f''' {type(lowerCamelCase )}.''' ) elif isinstance(lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = [negative_prompt] elif batch_size != len(lowerCamelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: UpperCAmelCase__ = negative_prompt UpperCAmelCase__ = text_input_ids.shape[-1] UpperCAmelCase__ = self.tokenizer( lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , ) UpperCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase__ = uncond_embeddings.shape[1] UpperCAmelCase__ = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 ) UpperCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase__ = torch.randn( lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device ) UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to( self.device ) else: UpperCAmelCase__ = torch.randn( lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase ) UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase__ = latents_reference.to(self.device ) UpperCAmelCase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase__ = 0 if dx < 0 else dx UpperCAmelCase__ = 0 if dy < 0 else dy UpperCAmelCase__ = max(-dx , 0 ) UpperCAmelCase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowerCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase__ = {} if accepts_eta: UpperCAmelCase__ = eta for i, t in enumerate(self.progress_bar(lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase__ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) # predict the noise residual UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 ) UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase__ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCamelCase , lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = 1 / 0.1_82_15 * latents UpperCAmelCase__ = self.vae.decode(lowerCamelCase ).sample UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase__ = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to( self.device ) UpperCAmelCase__ , UpperCAmelCase__ = self.safety_checker( images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase__ = None if output_type == "pil": UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
169
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": __snake_case = input('''Enter image url: ''').strip() print(F"""Downloading image from {url} ...""") __snake_case = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image __snake_case = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] __snake_case = requests.get(image_url).content __snake_case = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, '''wb''') as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
219
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json''' ), '''distilbert-base-uncased-finetuned-sst-2-english''': ( '''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json''' ), } class lowercase ( A__ ): """simple docstring""" _a = 'distilbert' _a = { 'hidden_size': 'dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', } def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=512 , UpperCamelCase_=False , UpperCamelCase_=6 , UpperCamelCase_=12 , UpperCamelCase_=768 , UpperCamelCase_=4 * 768 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=0.02 , UpperCamelCase_=0.1 , UpperCamelCase_=0.2 , UpperCamelCase_=0 , **UpperCamelCase_ , ): '''simple docstring''' UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :Dict = max_position_embeddings UpperCamelCase__ :str = sinusoidal_pos_embds UpperCamelCase__ :Any = n_layers UpperCamelCase__ :str = n_heads UpperCamelCase__ :Tuple = dim UpperCamelCase__ :str = hidden_dim UpperCamelCase__ :Dict = dropout UpperCamelCase__ :int = attention_dropout UpperCamelCase__ :Optional[Any] = activation UpperCamelCase__ :Optional[int] = initializer_range UpperCamelCase__ :Union[str, Any] = qa_dropout UpperCamelCase__ :Dict = seq_classif_dropout super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ ) class lowercase ( A__ ): """simple docstring""" @property def lowerCAmelCase__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ :str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase__ :str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
219
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __UpperCAmelCase ="pt" elif is_tf_available(): __UpperCAmelCase ="tf" else: __UpperCAmelCase ="jax" class a__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : Union[str, Any] =PerceiverTokenizer lowerCamelCase : Tuple =False def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" super().setUp() __lowerCamelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def SCREAMING_SNAKE_CASE__ ( self : Dict , **a : str ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **a ) def SCREAMING_SNAKE_CASE__ ( self : Any , a : str , a : Any=False , a : Any=20 , a : Union[str, Any]=5 ): """simple docstring""" __lowerCamelCase = [] for i in range(len(a ) ): try: __lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=a ) except UnicodeDecodeError: pass toks.append((i, tok) ) __lowerCamelCase = list(filter(lambda a : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , a ) ) __lowerCamelCase = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a ) , a ) ) if max_length is not None and len(a ) > max_length: __lowerCamelCase = toks[:max_length] if min_length is not None and len(a ) < min_length and len(a ) > 0: while len(a ) < min_length: __lowerCamelCase = toks + toks # toks_str = [t[1] for t in toks] __lowerCamelCase = [t[0] for t in toks] # Ensure consistency __lowerCamelCase = tokenizer.decode(a , clean_up_tokenization_spaces=a ) if " " not in output_txt and len(a ) > 1: __lowerCamelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a ) ) if with_prefix_space: __lowerCamelCase = ''' ''' + output_txt __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) return output_txt, output_ids def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" __lowerCamelCase = self.perceiver_tokenizer __lowerCamelCase = '''Unicode €.''' __lowerCamelCase = tokenizer(a ) __lowerCamelCase = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5] self.assertEqual(encoded['''input_ids'''] , a ) # decoding __lowerCamelCase = tokenizer.decode(a ) self.assertEqual(a , '''[CLS]Unicode €.[SEP]''' ) __lowerCamelCase = tokenizer('''e è é ê ë''' ) __lowerCamelCase = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5] self.assertEqual(encoded['''input_ids'''] , a ) # decoding __lowerCamelCase = tokenizer.decode(a ) self.assertEqual(a , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = self.perceiver_tokenizer __lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off __lowerCamelCase = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0] # fmt: on __lowerCamelCase = tokenizer(a , padding=a , return_tensors=a ) self.assertIsInstance(a , a ) if FRAMEWORK != "jax": __lowerCamelCase = list(batch.input_ids.numpy()[0] ) else: __lowerCamelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a , a ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = self.perceiver_tokenizer __lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __lowerCamelCase = tokenizer(a , padding=a , return_tensors=a ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , a ) self.assertIn('''attention_mask''' , a ) self.assertNotIn('''decoder_input_ids''' , a ) self.assertNotIn('''decoder_attention_mask''' , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = self.perceiver_tokenizer __lowerCamelCase = [ '''Summary of the text.''', '''Another summary.''', ] __lowerCamelCase = tokenizer( text_target=a , max_length=32 , padding='''max_length''' , truncation=a , return_tensors=a ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __lowerCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = ''' He is very happy, UNwant\u00E9d,running''' __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) tokenizer.save_pretrained(a ) __lowerCamelCase = tokenizer.__class__.from_pretrained(a ) __lowerCamelCase = after_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) shutil.rmtree(a ) __lowerCamelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) __lowerCamelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) tokenizer.save_pretrained(a ) __lowerCamelCase = tokenizer.__class__.from_pretrained(a ) __lowerCamelCase = after_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __lowerCamelCase = tokenizer.__class__.from_pretrained(a , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(a ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a ) with open(os.path.join(a , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: __lowerCamelCase = json.load(a ) with open(os.path.join(a , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: __lowerCamelCase = json.load(a ) __lowerCamelCase = [f"""<extra_id_{i}>""" for i in range(1_25 )] __lowerCamelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] __lowerCamelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(a , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(a , a ) with open(os.path.join(a , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(a , a ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __lowerCamelCase = tokenizer_class.from_pretrained( a , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __lowerCamelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=a )] __lowerCamelCase = tokenizer_class.from_pretrained( a , additional_special_tokens=a , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_78] ) , '''�''' ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" __lowerCamelCase = self.get_tokenizers(fast=a , do_lower_case=a ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] __lowerCamelCase = tokenizer.convert_tokens_to_string(a ) self.assertIsInstance(a , a )
67
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
303
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list: lowerCamelCase__ : Dict = int(UpperCamelCase ) if n_element < 1: lowerCamelCase__ : Tuple = ValueError("""a should be a positive number""" ) raise my_error lowerCamelCase__ : Any = [1] lowerCamelCase__ : Optional[int] = (0, 0, 0) lowerCamelCase__ : Dict = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _A : Dict =input('''Enter the last number (nth term) of the Hamming Number Series: ''') print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''') _A : Dict =hamming(int(n)) print('''-----------------------------------------------------''') print(F'The list with nth numbers is: {hamming_numbers}') print('''-----------------------------------------------------''')
370
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _A : List[Any] =True except ImportError: _A : int =False _A : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class _lowercase ( _lowercase ): @staticmethod def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ): lowerCamelCase__ : List[str] = parser.add_parser("""add-new-model""" ) add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" ) add_new_model_parser.add_argument("""--testing_file""" , type=UpperCamelCase__ , help="""Configuration file on which to run.""" ) add_new_model_parser.add_argument( """--path""" , type=UpperCamelCase__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" ) add_new_model_parser.set_defaults(func=UpperCamelCase__ ) def __init__( self: Optional[int] , UpperCamelCase__: bool , UpperCamelCase__: str , UpperCamelCase__: str=None , *UpperCamelCase__: Optional[int] ): lowerCamelCase__ : List[Any] = testing lowerCamelCase__ : Tuple = testing_file lowerCamelCase__ : int = path def lowerCamelCase_ ( self: int ): warnings.warn( """The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """ """It is not actively maintained anymore, so might give a result that won't pass all tests and quality """ """checks, you should use `transformers-cli add-new-model-like` instead.""" ) if not _has_cookiecutter: raise ImportError( """Model creation dependencies are required to use the `add_new_model` command. Install them by running """ """the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowerCamelCase__ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]] if len(UpperCamelCase__ ) > 0: raise ValueError( """Several directories starting with `cookiecutter-template-` in current working directory. """ """Please clean your directory by removing all folders starting with `cookiecutter-template-` or """ """change your working directory.""" ) lowerCamelCase__ : int = ( Path(UpperCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) lowerCamelCase__ : int = path_to_transformer_root / """templates""" / """adding_a_new_model""" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCamelCase__ ) ) else: with open(self._testing_file , """r""" ) as configuration_file: lowerCamelCase__ : List[str] = json.load(UpperCamelCase__ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , ) lowerCamelCase__ : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0] # Retrieve configuration with open(directory + """/configuration.json""" , """r""" ) as configuration_file: lowerCamelCase__ : int = json.load(UpperCamelCase__ ) lowerCamelCase__ : Tuple = configuration["""lowercase_modelname"""] lowerCamelCase__ : int = configuration["""generate_tensorflow_pytorch_and_flax"""] os.remove(F'''{directory}/configuration.json''' ) lowerCamelCase__ : Union[str, Any] = """PyTorch""" in generate_tensorflow_pytorch_and_flax lowerCamelCase__ : Union[str, Any] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax lowerCamelCase__ : Tuple = """Flax""" in generate_tensorflow_pytorch_and_flax lowerCamelCase__ : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCamelCase__ ) # Tests require submodules as they have parent imports with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ): pass shutil.move( F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , ) shutil.move( F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(UpperCamelCase__: Optional[int] ): with open(UpperCamelCase__ , """r""" ) as f: lowerCamelCase__ : Union[str, Any] = f.readlines() with open(UpperCamelCase__ , """w""" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCamelCase__ ) if output_pytorch: if not self._testing: remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: List[str] ): # Create temp file lowerCamelCase__ , lowerCamelCase__ : Any = mkstemp() lowerCamelCase__ : Tuple = False with fdopen(UpperCamelCase__ , """w""" ) as new_file: with open(UpperCamelCase__ ) as old_file: for line in old_file: new_file.write(UpperCamelCase__ ) if line_to_copy_below in line: lowerCamelCase__ : int = True for line_to_copy in lines_to_copy: new_file.write(UpperCamelCase__ ) if not line_found: raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(UpperCamelCase__ , UpperCamelCase__ ) # Remove original file remove(UpperCamelCase__ ) # Move new file move(UpperCamelCase__ , UpperCamelCase__ ) def skip_units(UpperCamelCase__: Optional[int] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase__: List[str] ): with open(UpperCamelCase__ ) as datafile: lowerCamelCase__ : int = [] lowerCamelCase__ : Tuple = False lowerCamelCase__ : int = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowerCamelCase__ : List[str] = line.split("""\"""" )[1] lowerCamelCase__ : List[str] = skip_units(UpperCamelCase__ ) elif "# Below: " in line and "##" not in line: lowerCamelCase__ : List[Any] = line.split("""\"""" )[1] lowerCamelCase__ : Union[str, Any] = skip_units(UpperCamelCase__ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = [] elif "# Replace with" in line and "##" not in line: lowerCamelCase__ : str = [] elif "##" not in line: lines_to_copy.append(UpperCamelCase__ ) remove(UpperCamelCase__ ) replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(UpperCamelCase__ )
129
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig a__ : Dict = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class lowercase_ ( a__ ): __UpperCAmelCase = 'tapas' def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=10_24 , a=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , a=0.02 , a=1e-12 , a=0 , a=10.0 , a=0 , a=1.0 , a=None , a=1.0 , a=False , a=None , a=1.0 , a=1.0 , a=False , a=False , a="ratio" , a=None , a=None , a=64 , a=32 , a=False , a=True , a=False , a=False , a=True , a=False , a=None , a=None , **a , ): super().__init__(pad_token_id=a , **a ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_act UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_sizes UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps # Fine-tuning task hyperparameters UpperCamelCase__ = positive_label_weight UpperCamelCase__ = num_aggregation_labels UpperCamelCase__ = aggregation_loss_weight UpperCamelCase__ = use_answer_as_supervision UpperCamelCase__ = answer_loss_importance UpperCamelCase__ = use_normalized_answer_loss UpperCamelCase__ = huber_loss_delta UpperCamelCase__ = temperature UpperCamelCase__ = aggregation_temperature UpperCamelCase__ = use_gumbel_for_cells UpperCamelCase__ = use_gumbel_for_aggregation UpperCamelCase__ = average_approximation_function UpperCamelCase__ = cell_selection_preference UpperCamelCase__ = answer_loss_cutoff UpperCamelCase__ = max_num_rows UpperCamelCase__ = max_num_columns UpperCamelCase__ = average_logits_per_cell UpperCamelCase__ = select_one_column UpperCamelCase__ = allow_empty_column_selection UpperCamelCase__ = init_cell_selection_weights_to_zero UpperCamelCase__ = reset_position_index_per_cell UpperCamelCase__ = disable_per_token_loss # Aggregation hyperparameters UpperCamelCase__ = aggregation_labels UpperCamelCase__ = no_aggregation_label_index if isinstance(self.aggregation_labels , a ): UpperCamelCase__ = {int(a ): v for k, v in aggregation_labels.items()}
80
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = 42 _lowerCamelCase = jnp.floataa _lowerCamelCase = True def UpperCamelCase__( self ): '''simple docstring''' super().setup() __A : List[Any] = nn.Dense(5 , dtype=self.dtype ) def __call__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' __A : Tuple = super().__call__(*__lowerCamelCase , **__lowerCamelCase ) __A : Optional[int] = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule def __lowercase ( snake_case_ : Tuple ,snake_case_ : Optional[int] ,snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : Any ) ->List[Any]: '''simple docstring''' def cross_entropy(snake_case_ : str ,snake_case_ : Optional[Any] ,snake_case_ : Tuple=None ): __A : Dict = logits.shape[-1] __A : Dict = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype('''f4''' ) __A : int = jax.nn.log_softmax(snake_case_ ,axis=-1 ) __A : Optional[int] = -jnp.sum(labels * logits ,axis=-1 ) if reduction is not None: __A : Optional[int] = reduction(snake_case_ ) return loss __A : str = partial(snake_case_ ,reduction=jnp.mean ) __A : Dict = cross_entropy(snake_case_ ,snake_case_ ) __A : List[str] = cross_entropy(snake_case_ ,snake_case_ ) __A : str = cross_entropy(snake_case_ ,snake_case_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __snake_case : """simple docstring""" _lowerCamelCase = "google/bigbird-roberta-base" _lowerCamelCase = 30_00 _lowerCamelCase = 1_05_00 _lowerCamelCase = 1_28 _lowerCamelCase = 3 _lowerCamelCase = 1 _lowerCamelCase = 5 # tx_args _lowerCamelCase = 3e-5 _lowerCamelCase = 0.0 _lowerCamelCase = 2_00_00 _lowerCamelCase = 0.0_0_9_5 _lowerCamelCase = "bigbird-roberta-natural-questions" _lowerCamelCase = "training-expt" _lowerCamelCase = "data/nq-training.jsonl" _lowerCamelCase = "data/nq-validation.jsonl" def UpperCamelCase__( self ): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=__lowerCamelCase ) __A : Dict = os.path.join(self.base_dir , self.save_dir ) __A : Dict = self.batch_size_per_device * jax.device_count() @dataclass class __snake_case : """simple docstring""" _lowerCamelCase = 42 _lowerCamelCase = 40_96 # no dynamic padding on TPUs def __call__( self , __lowerCamelCase ): '''simple docstring''' __A : Optional[int] = self.collate_fn(__lowerCamelCase ) __A : Tuple = jax.tree_util.tree_map(__lowerCamelCase , __lowerCamelCase ) return batch def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A , __A : List[Any] = self.fetch_inputs(features['''input_ids'''] ) __A : Union[str, Any] = { '''input_ids''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ), '''attention_mask''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : Any = [self._fetch_inputs(__lowerCamelCase ) for ids in input_ids] return zip(*__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : Any = [1 for _ in range(len(__lowerCamelCase ) )] while len(__lowerCamelCase ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Optional[Any] ,snake_case_ : List[str]=None ) ->Optional[int]: '''simple docstring''' if seed is not None: __A : List[Any] = dataset.shuffle(seed=snake_case_ ) for i in range(len(snake_case_ ) // batch_size ): __A : Tuple = dataset[i * batch_size : (i + 1) * batch_size] yield dict(snake_case_ ) @partial(jax.pmap ,axis_name='''batch''' ) def __lowercase ( snake_case_ : str ,snake_case_ : Union[str, Any] ,**snake_case_ : List[str] ) ->Tuple: '''simple docstring''' def loss_fn(snake_case_ : List[str] ): __A : str = model_inputs.pop('''start_labels''' ) __A : str = model_inputs.pop('''end_labels''' ) __A : int = model_inputs.pop('''pooled_labels''' ) __A : Dict = state.apply_fn(**snake_case_ ,params=snake_case_ ,dropout_rng=snake_case_ ,train=snake_case_ ) __A , __A , __A : Union[str, Any] = outputs return state.loss_fn( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,) __A , __A : int = jax.random.split(snake_case_ ) __A : str = jax.value_and_grad(snake_case_ ) __A , __A : Optional[int] = grad_fn(state.params ) __A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' ) __A : List[str] = jax.lax.pmean(snake_case_ ,'''batch''' ) __A : str = state.apply_gradients(grads=snake_case_ ) return state, metrics, new_drp_rng @partial(jax.pmap ,axis_name='''batch''' ) def __lowercase ( snake_case_ : int ,**snake_case_ : Union[str, Any] ) ->List[str]: '''simple docstring''' __A : Tuple = model_inputs.pop('''start_labels''' ) __A : Dict = model_inputs.pop('''end_labels''' ) __A : int = model_inputs.pop('''pooled_labels''' ) __A : List[str] = state.apply_fn(**snake_case_ ,params=state.params ,train=snake_case_ ) __A , __A , __A : Dict = outputs __A : Optional[int] = state.loss_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) __A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' ) return metrics class __snake_case ( train_state.TrainState ): """simple docstring""" _lowerCamelCase = struct.field(pytree_node=SCREAMING_SNAKE_CASE__ ) @dataclass class __snake_case : """simple docstring""" _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = None def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ): '''simple docstring''' __A : Tuple = model.params __A : Union[str, Any] = TrainState.create( apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , loss_fn=__lowerCamelCase , ) if ckpt_dir is not None: __A , __A , __A , __A , __A : Optional[Any] = restore_checkpoint(__lowerCamelCase , __lowerCamelCase ) __A : List[Any] = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } __A , __A : List[str] = build_tx(**__lowerCamelCase ) __A : int = train_state.TrainState( step=__lowerCamelCase , apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , opt_state=__lowerCamelCase , ) __A : int = args __A : Optional[Any] = data_collator __A : Tuple = lr __A : List[Any] = params __A : Dict = jax_utils.replicate(__lowerCamelCase ) return state def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : List[Any] = self.args __A : Dict = len(__lowerCamelCase ) // args.batch_size __A : List[Any] = jax.random.PRNGKey(0 ) __A : Optional[Any] = jax.random.split(__lowerCamelCase , jax.device_count() ) for epoch in range(args.max_epochs ): __A : Tuple = jnp.array(0 , dtype=jnp.floataa ) __A : Optional[Any] = get_batched_dataset(__lowerCamelCase , args.batch_size , seed=__lowerCamelCase ) __A : Union[str, Any] = 0 for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc=F"""Running EPOCH-{epoch}""" ): __A : Optional[Any] = self.data_collator(__lowerCamelCase ) __A , __A , __A : Union[str, Any] = self.train_step_fn(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: __A : Union[str, Any] = jax_utils.unreplicate(state.step ) __A : Optional[int] = running_loss.item() / i __A : List[Any] = self.scheduler_fn(state_step - 1 ) __A : Union[str, Any] = self.evaluate(__lowerCamelCase , __lowerCamelCase ) __A : Optional[Any] = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(__lowerCamelCase ) ) self.logger.log(__lowerCamelCase , commit=__lowerCamelCase ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : Union[str, Any] = get_batched_dataset(__lowerCamelCase , self.args.batch_size ) __A : int = len(__lowerCamelCase ) // self.args.batch_size __A : Optional[Any] = jnp.array(0 , dtype=jnp.floataa ) __A : Dict = 0 for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc='''Evaluating ... ''' ): __A : List[str] = self.data_collator(__lowerCamelCase ) __A : Union[str, Any] = self.val_step_fn(__lowerCamelCase , **__lowerCamelCase ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : Dict = jax_utils.unreplicate(__lowerCamelCase ) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' ) self.model_save_fn(__lowerCamelCase , params=state.params ) with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__lowerCamelCase , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) ) with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , __lowerCamelCase ) print('''DONE''' ) def __lowercase ( snake_case_ : int ,snake_case_ : Dict ) ->Optional[int]: '''simple docstring''' print(F"""RESTORING CHECKPOINT FROM {save_dir}""" ,end=''' ... ''' ) with open(os.path.join(snake_case_ ,'''flax_model.msgpack''' ) ,'''rb''' ) as f: __A : List[Any] = from_bytes(state.params ,f.read() ) with open(os.path.join(snake_case_ ,'''opt_state.msgpack''' ) ,'''rb''' ) as f: __A : Optional[int] = from_bytes(state.opt_state ,f.read() ) __A : Tuple = joblib.load(os.path.join(snake_case_ ,'''args.joblib''' ) ) __A : List[str] = joblib.load(os.path.join(snake_case_ ,'''data_collator.joblib''' ) ) with open(os.path.join(snake_case_ ,'''training_state.json''' ) ,'''r''' ) as f: __A : Dict = json.load(snake_case_ ) __A : int = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def __lowercase ( snake_case_ : List[str] ,snake_case_ : Any ,snake_case_ : Dict ,snake_case_ : str ) ->List[str]: '''simple docstring''' __A : str = num_train_steps - warmup_steps __A : Union[str, Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=snake_case_ ,transition_steps=snake_case_ ) __A : Optional[Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=1e-7 ,transition_steps=snake_case_ ) __A : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] ) return lr def __lowercase ( snake_case_ : List[str] ,snake_case_ : List[Any] ,snake_case_ : Union[str, Any] ,snake_case_ : List[Any] ,snake_case_ : str ) ->List[str]: '''simple docstring''' def weight_decay_mask(snake_case_ : List[Any] ): __A : List[Any] = traverse_util.flatten_dict(snake_case_ ) __A : int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(snake_case_ ) __A : List[Any] = scheduler_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) __A : List[str] = optax.adamw(learning_rate=snake_case_ ,weight_decay=snake_case_ ,mask=snake_case_ ) return tx, lr
179
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCAmelCase_ : int = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCAmelCase_ : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCAmelCase_ : Optional[int] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: """simple docstring""" UpperCamelCase :Tuple = len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: """simple docstring""" UpperCamelCase :str = random.randint(0 , len(__magic_name__ ) - 1 ) UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:] UpperCamelCase :int = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: """simple docstring""" UpperCamelCase :Union[str, Any] = list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: UpperCamelCase :int = random.choice(__magic_name__ ) return "".join(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: """simple docstring""" UpperCamelCase :str = [] # Generate more children proportionally to the fitness score. UpperCamelCase :List[Any] = int(parent_a[1] * 100 ) + 1 UpperCamelCase :int = 10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): UpperCamelCase :Union[str, Any] = population_score[random.randint(0 , __magic_name__ )][0] UpperCamelCase , UpperCamelCase :Optional[int] = crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: """simple docstring""" if N_POPULATION < N_SELECTED: UpperCamelCase :Dict = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: UpperCamelCase :str = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(__magic_name__ ) # Generate random starting population. UpperCamelCase :Union[str, Any] = [] for _ in range(__magic_name__ ): population.append("""""".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. UpperCamelCase , UpperCamelCase :Optional[int] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. UpperCamelCase :int = [evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. UpperCamelCase :Tuple = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. UpperCamelCase :List[str] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. UpperCamelCase :Tuple = [ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) UpperCAmelCase_ : List[str] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
62
from string import ascii_uppercase UpperCAmelCase_ : str = {str(ord(c) - 55): c for c in ascii_uppercase} def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> str: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""int() can't convert non-string with explicit base""" ) if num < 0: raise ValueError("""parameter must be positive int""" ) if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if base in (0, 1): raise ValueError("""base must be >= 2""" ) if base > 36: raise ValueError("""base must be <= 36""" ) UpperCamelCase :Any = """""" UpperCamelCase :Any = 0 UpperCamelCase :int = 0 while div != 1: UpperCamelCase , UpperCamelCase :str = divmod(__magic_name__ , __magic_name__ ) if base >= 11 and 9 < mod < 36: UpperCamelCase :List[str] = ALPHABET_VALUES[str(__magic_name__ )] else: UpperCamelCase :Dict = str(__magic_name__ ) new_value += actual_value UpperCamelCase :int = num // base UpperCamelCase :Any = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(__magic_name__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(10_00): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
62
1
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class A_ ( _lowerCamelCase ): lowerCAmelCase__ = 0 lowerCAmelCase__ = False lowerCAmelCase__ = 3.0 class A_ ( unittest.TestCase ): def _lowerCAmelCase (self :Optional[Any] )-> Optional[int]: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} ) @require_cuda def _lowerCAmelCase (self :Optional[Any] )-> Optional[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. __A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() __A = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_0_2_4.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , _UpperCamelCase ) @require_multi_gpu def _lowerCAmelCase (self :Tuple )-> Optional[Any]: __A = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ : Optional[int] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) snake_case__ : Optional[int] = Accelerator(kwargs_handlers=[ddp_scaler]) snake_case__ : Dict = torch.nn.Linear(100, 200) snake_case__ : int = accelerator.prepare(model) # Check the values changed in kwargs snake_case__ : Optional[Any] = '' snake_case__ : int = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
117
import os from datetime import datetime as dt from github import Github snake_case__ : Union[str, Any] = [ 'good first issue', 'feature request', 'wip', ] def _a ( ) -> List[Any]: '''simple docstring''' __A = Github(os.environ['''GITHUB_TOKEN'''] ) __A = g.get_repo('''huggingface/accelerate''' ) __A = repo.get_issues(state='''open''' ) for issue in open_issues: __A = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase : i.created_at , reverse=lowerCamelCase ) __A = comments[0] if len(lowerCamelCase ) > 0 else None __A = dt.utcnow() __A = (current_time - issue.updated_at).days __A = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='''closed''' ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
117
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class lowerCamelCase__( __snake_case): UpperCAmelCase__ : List[Any] = """xlnet""" UpperCAmelCase__ : Union[str, Any] = ["""mems"""] UpperCAmelCase__ : str = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self: Optional[Any] , UpperCamelCase_: Any=3_20_00 , UpperCamelCase_: Any=10_24 , UpperCamelCase_: Any=24 , UpperCamelCase_: Tuple=16 , UpperCamelCase_: List[str]=40_96 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[int]="bi" , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: int=None , UpperCamelCase_: str=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Any=False , UpperCamelCase_: Any=-1 , UpperCamelCase_: int=False , UpperCamelCase_: int="last" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Dict="tanh" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=5 , UpperCamelCase_: str=5 , UpperCamelCase_: Union[str, Any]=1 , UpperCamelCase_: Optional[Any]=2 , **UpperCamelCase_: Union[str, Any] , ): __lowerCamelCase = vocab_size __lowerCamelCase = d_model __lowerCamelCase = n_layer __lowerCamelCase = n_head if d_model % n_head != 0: raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' ) __lowerCamelCase = d_model // n_head __lowerCamelCase = ff_activation __lowerCamelCase = d_inner __lowerCamelCase = untie_r __lowerCamelCase = attn_type __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = dropout __lowerCamelCase = mem_len __lowerCamelCase = reuse_len __lowerCamelCase = bi_data __lowerCamelCase = clamp_len __lowerCamelCase = same_length __lowerCamelCase = summary_type __lowerCamelCase = summary_use_proj __lowerCamelCase = summary_activation __lowerCamelCase = summary_last_dropout __lowerCamelCase = start_n_top __lowerCamelCase = end_n_top __lowerCamelCase = bos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , UpperCamelCase__ , ) __lowerCamelCase = kwargs["use_cache"] __lowerCamelCase = use_mems_eval __lowerCamelCase = use_mems_train super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @property def lowerCAmelCase__ ( self: List[str] ): logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def lowerCAmelCase__ ( self: int , UpperCamelCase_: int ): raise NotImplementedError( F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
369
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = 'segformer' def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ): super().__init__(**UpperCamelCase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , ) __lowerCamelCase = num_channels __lowerCamelCase = num_encoder_blocks __lowerCamelCase = depths __lowerCamelCase = sr_ratios __lowerCamelCase = hidden_sizes __lowerCamelCase = patch_sizes __lowerCamelCase = strides __lowerCamelCase = mlp_ratios __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = classifier_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = drop_path_rate __lowerCamelCase = layer_norm_eps __lowerCamelCase = decoder_hidden_size __lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ ) __lowerCamelCase = semantic_loss_ignore_index class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Any = version.parse('1.11') @property def lowerCAmelCase__ ( self: Any ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase__ ( self: Union[str, Any] ): return 1E-4 @property def lowerCAmelCase__ ( self: Dict ): return 12
29
0
"""simple docstring""" from __future__ import annotations import csv import requests from bsa import BeautifulSoup def lowercase ( __snake_case : str = "" ): lowercase_ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" lowercase_ : int = BeautifulSoup(requests.get(_lowerCamelCase ).text , '''html.parser''' ) lowercase_ : int = soup.find_all('''td''' , attrs='''titleColumn''' ) lowercase_ : Any = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowerCamelCase , _lowerCamelCase ) } def lowercase ( __snake_case : str = "IMDb_Top_250_Movies.csv" ): lowercase_ : Dict = get_imdb_top_aaa_movies() with open(_lowerCamelCase , '''w''' , newline='''''' ) as out_file: lowercase_ : Optional[int] = csv.writer(_lowerCamelCase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
33
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=True , ) -> Optional[Any]: a : str = parent a : str = batch_size a : Union[str, Any] = num_channels a : Optional[Any] = image_size a : int = min_resolution a : Optional[int] = max_resolution a : List[Any] = do_resize a : List[Any] = size_divisor a : Tuple = do_rescale def __a ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =GLPNImageProcessor if is_vision_available() else None def __a ( self ) -> int: a : Any = GLPNImageProcessingTester(self ) @property def __a ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ) -> str: a : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size_divisor" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "resample" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale" ) ) def __a ( self ) -> List[str]: pass def __a ( self ) -> Union[str, Any]: # Initialize image_processing a : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self ) -> str: # Initialize image_processing a : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self ) -> int: # Initialize image_processing a : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) a : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
352
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]: '''simple docstring''' if isinstance(_lowercase , collections.abc.Iterable ): return x return (x, x) @require_flax class __UpperCamelCase : def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: pass def __a ( self ) -> List[Any]: pass def __a ( self ) -> str: pass def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: a : Dict = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict: a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]: a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = {"vision_model": vision_model, "text_model": text_model} a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]: a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) a : Tuple = {"vision_model": vision_model, "text_model": text_model} a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) a : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) a : List[Any] = after_output[0] a : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1E-3 ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]: a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[Any] = {"vision_model": vision_model, "text_model": text_model} a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) a : Tuple = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) a : int = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) a : Optional[int] = to_atuple(vision_model.config.image_size ) a : Tuple = to_atuple(vision_model.config.patch_size ) a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) a : Dict = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) a : str = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: pt_model.to(lowerCAmelCase__ ) pt_model.eval() # prepare inputs a : List[Any] = inputs_dict a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): a : int = pt_model(**lowerCAmelCase__ ).to_tuple() a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__ ) a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ ) a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__ ) a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ ) pt_model_loaded.to(lowerCAmelCase__ ) pt_model_loaded.eval() with torch.no_grad(): a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ ) a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ ) a : List[str] = fx_state self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ ) a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Dict: a : Any = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__ ) def __a ( self ) -> Dict: a : List[str] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : int = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : Tuple = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__ ) @is_pt_flax_cross_test def __a ( self ) -> Any: a : List[Any] = self.prepare_config_and_inputs() a : Tuple = config_inputs_dict.pop("vision_config" ) a : int = config_inputs_dict.pop("text_config" ) a : List[str] = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def __a ( self ) -> List[Any]: a, a : Optional[int] = self.get_pretrained_model_and_inputs() a : Optional[int] = model_a(**lowerCAmelCase__ ) a : Optional[int] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__ ) a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) a : str = model_a(**lowerCAmelCase__ ) a : Dict = after_outputs[0] a : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1E-5 ) @require_flax class __UpperCamelCase ( a__ , unittest.TestCase ): def __a ( self ) -> List[Any]: a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) a : Any = 13 a : str = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) a : Optional[Any] = random_attention_mask([batch_size, 4] ) a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: a : Dict = FlaxViTModel(lowerCAmelCase__ ) a : Dict = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def __a ( self ) -> str: a : Union[str, Any] = FlaxViTModelTester(self ) a : Dict = FlaxBertModelTester(self ) a : str = vit_model_tester.prepare_config_and_inputs() a : Any = bert_model_tester.prepare_config_and_inputs() a, a : Optional[int] = vision_config_and_inputs a, a, a, a : Dict = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __UpperCamelCase ( a__ , unittest.TestCase ): def __a ( self ) -> List[Any]: a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) a : Tuple = 13 a : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) a : Tuple = random_attention_mask([batch_size, 4] ) a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ ) a : Tuple = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def __a ( self ) -> List[Any]: a : Tuple = FlaxCLIPVisionModelTester(self ) a : Union[str, Any] = FlaxBertModelTester(self ) a : Dict = clip_model_tester.prepare_config_and_inputs() a : Optional[int] = bert_model_tester.prepare_config_and_inputs() a, a : Dict = vision_config_and_inputs a, a, a, a : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __UpperCamelCase ( unittest.TestCase ): @slow def __a ( self ) -> Dict: a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 ) a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a : Optional[int] = processor( text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ) a : Optional[Any] = model(**lowerCAmelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
79
0
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = CanineTokenizer lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Any ): super().setUp() __snake_case: str = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : Optional[int] ): return CanineTokenizer.from_pretrained("""google/canine-s""" ) def UpperCAmelCase__ ( self : str , **A : List[str] ): __snake_case: Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) __snake_case: int = 1_024 return tokenizer @require_torch def UpperCAmelCase__ ( self : int ): __snake_case: Dict = self.canine_tokenizer __snake_case: int = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case: List[str] = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on __snake_case: int = tokenizer(A , padding=A , return_tensors="""pt""" ) self.assertIsInstance(A , A ) __snake_case: int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A , A ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def UpperCAmelCase__ ( self : int ): __snake_case: Optional[Any] = self.canine_tokenizer __snake_case: Tuple = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case: Any = tokenizer(A , padding=A , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , A ) self.assertIn("""attention_mask""" , A ) self.assertIn("""token_type_ids""" , A ) @require_torch def UpperCAmelCase__ ( self : int ): __snake_case: int = self.canine_tokenizer __snake_case: Optional[int] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case: str = tokenizer( text_target=A , max_length=32 , padding="""max_length""" , truncation=A , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def UpperCAmelCase__ ( self : int ): # safety check on max_len default value so we are sure the test works __snake_case: Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case: List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case: str = tempfile.mkdtemp() __snake_case: List[Any] = """ He is very happy, UNwant\u00E9d,running""" __snake_case: Optional[Any] = tokenizer.encode(A , add_special_tokens=A ) tokenizer.save_pretrained(A ) __snake_case: int = tokenizer.__class__.from_pretrained(A ) __snake_case: Dict = after_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) shutil.rmtree(A ) __snake_case: Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case: str = tempfile.mkdtemp() __snake_case: Tuple = """ He is very happy, UNwant\u00E9d,running""" __snake_case: Dict = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case: Union[str, Any] = chr(0xe0_07 ) additional_special_tokens.append(A ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case: Optional[int] = tokenizer.encode(A , add_special_tokens=A ) tokenizer.save_pretrained(A ) __snake_case: str = tokenizer.__class__.from_pretrained(A ) __snake_case: List[str] = after_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) self.assertIn(A , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case: Tuple = tokenizer.__class__.from_pretrained(A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(A ) def UpperCAmelCase__ ( self : int ): __snake_case: int = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case: List[Any] = self.get_clean_sequence(A ) # a special token for Canine can be defined as follows: __snake_case: List[Any] = 0xe0_05 __snake_case: Optional[int] = chr(A ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case: str = tokenizer.encode(A , add_special_tokens=A ) self.assertEqual(len(A ) , 1 ) __snake_case: Tuple = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A ) __snake_case: List[str] = tokenizer.encode(A , add_special_tokens=A ) __snake_case: Dict = tokenizer.encode(A , add_special_tokens=A ) __snake_case: List[Any] = tokenizer.encode(A , add_special_tokens=A ) self.assertEqual(A , input_encoded + special_token_id ) __snake_case: int = tokenizer.decode(A , skip_special_tokens=A ) self.assertTrue(special_token not in decoded ) def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: str = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case: Any = chr(0xe0_05 ) __snake_case: List[Any] = chr(0xe0_06 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case: str = tokenizer.tokenize(A ) __snake_case: int = tokenizer.tokenize(A ) self.assertEqual(len(A ) , 1 ) self.assertEqual(len(A ) , 1 ) self.assertEqual(token_a[0] , A ) self.assertEqual(token_a[0] , A ) @require_tokenizers def UpperCAmelCase__ ( self : Tuple ): __snake_case: Optional[int] = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case: Tuple = 0xe0_06 __snake_case: Union[str, Any] = chr(A ) __snake_case: Tuple = AddedToken(A , lstrip=A ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(A ) tokenizer.from_pretrained(A ) def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A ) with open(os.path.join(A , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case: Optional[Any] = json.load(A ) with open(os.path.join(A , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case: Dict = json.load(A ) # a special token for Canine can be defined as follows: __snake_case: str = 0xe0_06 __snake_case: int = chr(A ) __snake_case: int = [new_token_a] __snake_case: List[Any] = [new_token_a] with open(os.path.join(A , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(A , A ) with open(os.path.join(A , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(A , A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case: int = tokenizer_class.from_pretrained(A , extra_ids=0 ) self.assertIn(A , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case: int = 0xe0_07 __snake_case: List[str] = chr(A ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case: int = [AddedToken(A , lstrip=A )] __snake_case: Dict = tokenizer_class.from_pretrained( A , additional_special_tokens=A , extra_ids=0 ) self.assertIn(A , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCAmelCase__ ( self : List[Any] ): __snake_case: int = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case: List[str] = """hello world""" if self.space_between_special_tokens: __snake_case: Any = """[CLS] hello world [SEP]""" else: __snake_case: Dict = input __snake_case: Any = tokenizer.encode(A , add_special_tokens=A ) __snake_case: Optional[Any] = tokenizer.decode(A , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(A , [output, output.lower()] ) def UpperCAmelCase__ ( self : Dict ): __snake_case: Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case: Dict = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case: Optional[int] = """a""" __snake_case: Tuple = ord(A ) for attr in attributes_list: setattr(A , attr + """_id""" , A ) self.assertEqual(getattr(A , A ) , A ) self.assertEqual(getattr(A , attr + """_id""" ) , A ) setattr(A , attr + """_id""" , A ) self.assertEqual(getattr(A , A ) , A ) self.assertEqual(getattr(A , attr + """_id""" ) , A ) setattr(A , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(A , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(A , """additional_special_tokens_ids""" ) , [] ) __snake_case: Tuple = 0xe0_06 __snake_case: Tuple = chr(A ) setattr(A , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(A , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(A , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def UpperCAmelCase__ ( self : int ): pass def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : Tuple ): pass def UpperCAmelCase__ ( self : Tuple ): pass def UpperCAmelCase__ ( self : Tuple ): pass def UpperCAmelCase__ ( self : int ): pass def UpperCAmelCase__ ( self : Any ): pass def UpperCAmelCase__ ( self : Any ): pass
111
import pytest __UpperCAmelCase : Optional[Any] = "__dummy_dataset1__" __UpperCAmelCase : List[str] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def A__ ( ) -> Optional[int]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def A__ ( ) -> Tuple: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple: __snake_case: List[Any] = dataset_loading_script_name __snake_case: Any = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=SCREAMING_SNAKE_CASE__) __snake_case: int = script_dir / F'''{script_name}.py''' with open(SCREAMING_SNAKE_CASE__ , """w""") as f: f.write(SCREAMING_SNAKE_CASE__) return str(SCREAMING_SNAKE_CASE__)
111
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : str = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'data2vec-vision' def __init__( self : Dict , A : int=7_6_8 , A : List[Any]=1_2 , A : List[str]=1_2 , A : Union[str, Any]=3_0_7_2 , A : Tuple="gelu" , A : List[Any]=0.0 , A : List[Any]=0.0 , A : Optional[Any]=0.02 , A : Any=1e-12 , A : Union[str, Any]=2_2_4 , A : Union[str, Any]=1_6 , A : Union[str, Any]=3 , A : Tuple=False , A : List[Any]=False , A : str=False , A : Optional[Any]=False , A : List[str]=0.1 , A : List[Any]=0.1 , A : Tuple=True , A : Optional[Any]=[3, 5, 7, 1_1] , A : Any=[1, 2, 3, 6] , A : int=True , A : Dict=0.4 , A : Any=2_5_6 , A : List[str]=1 , A : str=False , A : Optional[Any]=2_5_5 , **A : Optional[int] , ): super().__init__(**A ) _UpperCAmelCase : str = hidden_size _UpperCAmelCase : Optional[Any] = num_hidden_layers _UpperCAmelCase : Dict = num_attention_heads _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : List[str] = hidden_act _UpperCAmelCase : str = hidden_dropout_prob _UpperCAmelCase : List[Any] = attention_probs_dropout_prob _UpperCAmelCase : Union[str, Any] = initializer_range _UpperCAmelCase : int = layer_norm_eps _UpperCAmelCase : Optional[int] = image_size _UpperCAmelCase : Optional[Any] = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Optional[Any] = use_mask_token _UpperCAmelCase : str = use_absolute_position_embeddings _UpperCAmelCase : Optional[int] = use_relative_position_bias _UpperCAmelCase : Optional[int] = use_shared_relative_position_bias _UpperCAmelCase : List[str] = layer_scale_init_value _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : Union[str, Any] = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase : Dict = out_indices _UpperCAmelCase : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase : Dict = use_auxiliary_head _UpperCAmelCase : Dict = auxiliary_loss_weight _UpperCAmelCase : str = auxiliary_channels _UpperCAmelCase : Dict = auxiliary_num_convs _UpperCAmelCase : Optional[int] = auxiliary_concat_input _UpperCAmelCase : Tuple = semantic_loss_ignore_index class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : Any = version.parse('1.11' ) @property def snake_case_ ( self : List[Any] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case_ ( self : List[Any] ): return 1e-4
202
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" _lowerCAmelCase : Optional[int] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" _lowerCAmelCase : Any = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def snake_case_ ( self : List[str] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def snake_case_ ( self : str , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A ) }
202
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _SCREAMING_SNAKE_CASE ( __A ): def _A ( self : str ): UpperCamelCase :Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , """tf_padding""" ) ) self.parent.assertTrue(hasattr(__lowercase , """depth_multiplier""" ) ) class _SCREAMING_SNAKE_CASE : def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=32 , __lowerCamelCase : Optional[int]=0.25 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=True , __lowerCamelCase : Any=1_024 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Tuple="relu6" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=10 , __lowerCamelCase : Optional[int]=None , ): UpperCamelCase :Any = parent UpperCamelCase :Union[str, Any] = batch_size UpperCamelCase :Optional[Any] = num_channels UpperCamelCase :Optional[Any] = image_size UpperCamelCase :Union[str, Any] = depth_multiplier UpperCamelCase :List[Any] = min_depth UpperCamelCase :List[str] = tf_padding UpperCamelCase :List[Any] = int(last_hidden_size * depth_multiplier ) UpperCamelCase :int = output_stride UpperCamelCase :str = hidden_act UpperCamelCase :List[str] = classifier_dropout_prob UpperCamelCase :List[str] = use_labels UpperCamelCase :Tuple = is_training UpperCamelCase :Optional[Any] = num_labels UpperCamelCase :Dict = initializer_range UpperCamelCase :Dict = scope def _A ( self : Tuple ): UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase :Any = None UpperCamelCase :str = None if self.use_labels: UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase :str = self.get_config() return config, pixel_values, labels, pixel_labels def _A ( self : Tuple ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ): UpperCamelCase :Dict = MobileNetVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() UpperCamelCase :List[Any] = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _A ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Dict ): UpperCamelCase :List[Any] = self.num_labels UpperCamelCase :Optional[int] = MobileNetVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() UpperCamelCase :Dict = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self : Tuple ): UpperCamelCase :str = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs UpperCamelCase :Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __A , __A , unittest.TestCase ): snake_case__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () snake_case__ : Tuple = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) snake_case__ : Dict = False snake_case__ : Optional[int] = False snake_case__ : Optional[Any] = False snake_case__ : List[str] = False def _A ( self : Optional[int] ): UpperCamelCase :Any = MobileNetVaModelTester(self ) UpperCamelCase :int = MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def _A ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" ) def _A ( self : Optional[int] ): pass @unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" ) def _A ( self : Dict ): pass @unittest.skip(reason="""MobileNetV1 does not output attentions""" ) def _A ( self : Tuple ): pass def _A ( self : int ): UpperCamelCase , UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :List[str] = model_class(__lowercase ) UpperCamelCase :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :str = [*signature.parameters.keys()] UpperCamelCase :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def _A ( self : List[Any] ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def _A ( self : int ): def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : str ): UpperCamelCase :Tuple = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): UpperCamelCase :Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase ) ) UpperCamelCase :List[str] = outputs.hidden_states UpperCamelCase :Dict = 26 self.assertEqual(len(__lowercase ) , __lowercase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Union[str, Any] = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase :List[Any] = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def _A ( self : int ): UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @slow def _A ( self : str ): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Dict = MobileNetVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def _A ( self : str ): return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None ) @slow def _A ( self : Tuple ): UpperCamelCase :Union[str, Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__lowercase ) UpperCamelCase :List[Any] = self.default_image_processor UpperCamelCase :List[Any] = prepare_img() UpperCamelCase :str = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): UpperCamelCase :Tuple = model(**__lowercase ) # verify the logits UpperCamelCase :List[str] = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , __lowercase ) UpperCamelCase :Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
38
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): assert x is not None assert y is not None __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = len(lowerCamelCase) # declaring the array for storing the dp values __lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 for i in range(1, m + 1): for j in range(1, n + 1): __lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0 __lowerCAmelCase = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) __lowerCAmelCase = '''''' __lowerCAmelCase , __lowerCAmelCase = m, n while i > 0 and j > 0: __lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: __lowerCAmelCase = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": _UpperCAmelCase : List[Any] = """AGGTAB""" _UpperCAmelCase : int = """GXTXAYB""" _UpperCAmelCase : Any = 4 _UpperCAmelCase : List[Any] = """GTAB""" _UpperCAmelCase ,_UpperCAmelCase : Optional[int] = longest_common_subsequence(a, b) print("""len =""", ln, """, sub-sequence =""", subseq) import doctest doctest.testmod()
174
0
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ): UpperCAmelCase =IFInpaintingPipeline UpperCAmelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} UpperCAmelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase =PipelineTesterMixin.required_optional_params - {"latents"} def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' return self._get_dummy_components() def lowerCAmelCase ( self , snake_case , snake_case=0) -> Any: '''simple docstring''' if str(snake_case).startswith('mps'): _UpperCAmelCase : str =torch.manual_seed(snake_case) else: _UpperCAmelCase : List[str] =torch.Generator(device=snake_case).manual_seed(snake_case) _UpperCAmelCase : int =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case)).to(snake_case) _UpperCAmelCase : List[str] =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case)).to(snake_case) _UpperCAmelCase : Dict ={ 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase ( self) -> int: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA') def lowerCAmelCase ( self) -> Any: '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' self._test_save_load_local() def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
242
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowercase =logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class __magic_name__ ( lowerCAmelCase ): def __init__( self , **snake_case) -> Optional[int]: '''simple docstring''' super().__init__(**snake_case) requires_backends(self , 'vision') self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__( self , snake_case , **snake_case) -> str: '''simple docstring''' return super().__call__(snake_case , **snake_case) def lowerCAmelCase ( self , **snake_case) -> int: '''simple docstring''' _UpperCAmelCase : str ={} if "candidate_labels" in kwargs: _UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _UpperCAmelCase : List[Any] =kwargs['hypothesis_template'] return preprocess_params, {}, {} def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any: '''simple docstring''' _UpperCAmelCase : Optional[Any] =load_image(snake_case) _UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework) _UpperCAmelCase : Union[str, Any] =candidate_labels _UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels] _UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case) _UpperCAmelCase : Any =[text_inputs] return inputs def lowerCAmelCase ( self , snake_case) -> str: '''simple docstring''' _UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels') _UpperCAmelCase : Tuple =model_inputs.pop('text_inputs') if isinstance(text_inputs[0] , snake_case): _UpperCAmelCase : Any =text_inputs[0] else: # Batching case. _UpperCAmelCase : str =text_inputs[0][0] _UpperCAmelCase : Any =self.model(**snake_case , **snake_case) _UpperCAmelCase : List[str] ={ 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def lowerCAmelCase ( self , snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : str =model_outputs.pop('candidate_labels') _UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0] if self.framework == "pt": _UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1) _UpperCAmelCase : Union[str, Any] =probs.tolist() if not isinstance(snake_case , snake_case): _UpperCAmelCase : Union[str, Any] =[scores] elif self.framework == "tf": _UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1) _UpperCAmelCase : str =probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}") _UpperCAmelCase : List[str] =[ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0]) ] return result
242
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a : Optional[Any] = logging.get_logger(__name__) a : int = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Dict ) ->Optional[Any]: '''simple docstring''' for attribute in key.split("." ): a : Dict = getattr(_lowercase , _lowercase ) if weight_type is not None: a : Optional[Any] = getattr(_lowercase , _lowercase ).shape else: a : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": a : List[str] = value elif weight_type == "weight_g": a : int = value elif weight_type == "weight_v": a : int = value elif weight_type == "bias": a : Tuple = value else: a : Union[str, Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[str] , _lowercase : int ) ->Any: '''simple docstring''' a : Dict = [] a : int = fairseq_model.state_dict() a : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): a : int = False if "conv_layers" in name: load_conv_layer( _lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , ) a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): a : Optional[Any] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): a : List[str] = True if "*" in mapped_key: a : Union[str, Any] = name.split(_lowercase )[0].split("." )[-2] a : str = mapped_key.replace("*" , _lowercase ) if "weight_g" in name: a : Optional[int] = "weight_g" elif "weight_v" in name: a : Optional[Any] = "weight_v" elif "weight" in name: a : Tuple = "weight" elif "bias" in name: a : Tuple = "bias" else: a : Union[str, Any] = None set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] ) ->List[Any]: '''simple docstring''' a : List[Any] = full_name.split("conv_layers." )[-1] a : Any = name.split("." ) a : List[str] = int(items[0] ) a : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a : Tuple = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) a : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowercase ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Tuple , _lowercase : List[str]=None , _lowercase : Dict=None , _lowercase : int=True ) ->List[Any]: '''simple docstring''' if config_path is not None: a : Tuple = HubertConfig.from_pretrained(_lowercase ) else: a : Any = HubertConfig() if is_finetuned: if dict_path: a : str = Dictionary.load(_lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a : int = target_dict.pad_index a : Optional[int] = target_dict.bos_index a : Dict = target_dict.eos_index a : Optional[int] = len(target_dict.symbols ) a : List[str] = os.path.join(_lowercase , "vocab.json" ) if not os.path.isdir(_lowercase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowercase ) ) return os.makedirs(_lowercase , exist_ok=_lowercase ) with open(_lowercase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , _lowercase ) a : Optional[int] = WavaVecaCTCTokenizer( _lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowercase , ) a : int = True if config.feat_extract_norm == "layer" else False a : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , ) a : str = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase ) processor.save_pretrained(_lowercase ) a : int = HubertForCTC(_lowercase ) else: a : str = HubertModel(_lowercase ) if is_finetuned: a, a, a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: a, a, a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) a : Optional[int] = model[0].eval() recursively_load_weights(_lowercase , _lowercase , _lowercase ) hf_wavavec.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a : Optional[Any] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
105
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ): """simple docstring""" __lowercase : Optional[Any] = XLNetTokenizer __lowercase : List[str] = XLNetTokenizerFast __lowercase : List[Any] = True __lowercase : int = True def snake_case_ ( self): super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """<s>""" __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<unk>""") self.assertEqual(vocab_keys[1] , """<s>""") self.assertEqual(vocab_keys[-1] , """<eod>""") self.assertEqual(len(lowerCAmelCase__) , 1_0_0_6) def snake_case_ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""") self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4]) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""]) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""") __SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def snake_case_ ( self): # fmt: off __SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
100
0
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def A_ ( snake_case_ : int ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def A_ ( ): '''simple docstring''' with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" UpperCamelCase : Optional[Any] = [1, 2, 3] with pytest.raises(snake_case_ ): with parallel_backend("""unsupported backend""" ): map_nested(snake_case_ ,snake_case_ ,num_proc=2 ) with pytest.raises(snake_case_ ): with parallel_backend("""unsupported backend""" ): map_nested(snake_case_ ,snake_case_ ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" ,[2, -1] ) def A_ ( snake_case_ : List[str] ): '''simple docstring''' UpperCamelCase : List[Any] = [1, 2] UpperCamelCase : List[Any] = {"""a""": 1, """b""": 2} UpperCamelCase : List[str] = {"""a""": [1, 2], """b""": [3, 4]} UpperCamelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2} UpperCamelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} UpperCamelCase : Optional[int] = [2, 3] UpperCamelCase : List[str] = {"""a""": 2, """b""": 3} UpperCamelCase : Any = {"""a""": [2, 3], """b""": [4, 5]} UpperCamelCase : Tuple = {"""a""": {"""1""": 2}, """b""": 3} UpperCamelCase : List[str] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
356
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A_ ( snake_case_ : Dataset ,snake_case_ : Dict[str, str] ): '''simple docstring''' UpperCamelCase : List[str] = args.log_outputs UpperCamelCase : Tuple = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric UpperCamelCase : List[Any] = load_metric("""wer""" ) UpperCamelCase : Any = load_metric("""cer""" ) # compute metrics UpperCamelCase : str = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] ) UpperCamelCase : Dict = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] ) # print & log results UpperCamelCase : Optional[int] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case_ ) with open(f'{dataset_id}_eval_results.txt' ,"""w""" ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCamelCase : Optional[Any] = f'log_{dataset_id}_predictions.txt' UpperCamelCase : str = f'log_{dataset_id}_targets.txt' with open(snake_case_ ,"""w""" ) as p, open(snake_case_ ,"""w""" ) as t: # mapping function to write output def write_to_file(snake_case_ : Union[str, Any] ,snake_case_ : Tuple ): p.write(f'{i}' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(f'{i}' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(snake_case_ ,with_indices=snake_case_ ) def A_ ( snake_case_ : str ): '''simple docstring''' UpperCamelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCamelCase : str = re.sub(snake_case_ ,"""""" ,text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCamelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: UpperCamelCase : Tuple = """ """.join(text.split(snake_case_ ) ) return text def A_ ( snake_case_ : str ): '''simple docstring''' # load dataset UpperCamelCase : Union[str, Any] = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCamelCase : Dict = feature_extractor.sampling_rate # resample audio UpperCamelCase : Optional[Any] = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: UpperCamelCase : int = 0 if torch.cuda.is_available() else -1 UpperCamelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device ) # map function to decode audio def map_to_pred(snake_case_ : Union[str, Any] ): UpperCamelCase : List[Any] = asr( batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s ) UpperCamelCase : Union[str, Any] = prediction["""text"""] UpperCamelCase : Optional[Any] = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples UpperCamelCase : Any = dataset.map(snake_case_ ,remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ ,snake_case_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) __A : Optional[Any] = parser.parse_args() main(args)
27
0
def lowerCamelCase__ ( a , a , a , a ) -> Dict: # Return True if there is node that has not iterated. _A: int = [False] * len(a ) _A: Any = [] queue.append(a ) _A: List[Any] = True while queue: _A: Dict = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(a ) _A: Dict = True _A: Optional[int] = u return visited[t] def lowerCamelCase__ ( a , a , a ) -> List[Any]: # This array is filled by BFS and to store path _A: Any = [-1] * (len(a )) _A: Union[str, Any] = 0 while bfs(a , a , a , a ): _A: str = float('''Inf''' ) _A: Any = sink while s != source: # Find the minimum value in select path _A: Optional[int] = min(a , graph[parent[s]][s] ) _A: List[str] = parent[s] max_flow += path_flow _A: Union[str, Any] = sink while v != source: _A: List[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _A: List[Any] = parent[v] return max_flow UpperCAmelCase__ : Optional[int] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] UpperCAmelCase__ ,UpperCAmelCase__ : List[str] = 0, 5 print(ford_fulkerson(graph, source, sink))
121
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''' , [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ] , ) def lowerCamelCase__ ( a , a ) -> Any: _A: Any = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) _A: Optional[int] = DatasetInfosDict.from_directory(a ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''' , [ DatasetInfo(), DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ), ] , ) def lowerCamelCase__ ( a , a ) -> Any: _A: int = str(a ) dataset_info.write_to_directory(a ) _A: str = DatasetInfo.from_directory(a ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a , '''dataset_info.json''' ) ) def lowerCamelCase__ ( ) -> Any: _A: int = DatasetInfo( description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , ) _A: Optional[Any] = dataset_info._to_yaml_dict() assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _A: str = yaml.safe_dump(a ) _A: Optional[int] = yaml.safe_load(a ) assert dataset_info_yaml_dict == reloaded def lowerCamelCase__ ( ) -> int: _A: Union[str, Any] = DatasetInfo() _A: Union[str, Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''' , [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=13_37 ), } ), ] , ) def lowerCamelCase__ ( a , a ) -> Optional[int]: _A: Optional[int] = str(a ) dataset_infos_dict.write_to_directory(a ) _A: Union[str, Any] = DatasetInfosDict.from_directory(a ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _A: Optional[Any] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _A: Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a , '''README.md''' ) )
121
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase = { """configuration_conditional_detr""": [ """CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConditionalDetrConfig""", """ConditionalDetrOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ["""ConditionalDetrFeatureExtractor"""] _lowercase = ["""ConditionalDetrImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConditionalDetrForObjectDetection""", """ConditionalDetrForSegmentation""", """ConditionalDetrModel""", """ConditionalDetrPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
229
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
229
1
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } _A = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =EfficientNetConfig() __UpperCamelCase =CONFIG_MAP[model_name]['hidden_dim'] __UpperCamelCase =CONFIG_MAP[model_name]['width_coef'] __UpperCamelCase =CONFIG_MAP[model_name]['depth_coef'] __UpperCamelCase =CONFIG_MAP[model_name]['image_size'] __UpperCamelCase =CONFIG_MAP[model_name]['dropout_rate'] __UpperCamelCase =CONFIG_MAP[model_name]['dw_padding'] __UpperCamelCase ='huggingface/label-files' __UpperCamelCase ='imagenet-1k-id2label.json' __UpperCamelCase =10_00 __UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) __UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} __UpperCamelCase =idalabel __UpperCamelCase ={v: k for k, v in idalabel.items()} return config def _UpperCAmelCase ( ): __UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg' __UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =CONFIG_MAP[model_name]['image_size'] __UpperCamelCase =EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=SCREAMING_SNAKE_CASE__ , ) return preprocessor def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ): __UpperCamelCase =[v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] __UpperCamelCase =sorted(set(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase ={b: str(SCREAMING_SNAKE_CASE__ ) for b, i in zip(SCREAMING_SNAKE_CASE__ , range(SCREAMING_SNAKE_CASE__ ) )} __UpperCamelCase =[] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: __UpperCamelCase =block_name_mapping[b] rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) __UpperCamelCase ={} for item in rename_keys: if item[0] in original_param_names: __UpperCamelCase ='efficientnet.' + item[1] __UpperCamelCase ='classifier.weight' __UpperCamelCase ='classifier.bias' return key_mapping def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ): for key, value in tf_params.items(): if "normalization" in key: continue __UpperCamelCase =key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCamelCase =torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE__ ) ) else: __UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =model_classes[model_name]( include_top=SCREAMING_SNAKE_CASE__ , weights='imagenet' , input_tensor=SCREAMING_SNAKE_CASE__ , input_shape=SCREAMING_SNAKE_CASE__ , pooling=SCREAMING_SNAKE_CASE__ , classes=10_00 , classifier_activation='softmax' , ) __UpperCamelCase =original_model.trainable_variables __UpperCamelCase =original_model.non_trainable_variables __UpperCamelCase ={param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCamelCase =param.numpy() __UpperCamelCase =list(tf_params.keys() ) # Load HuggingFace model __UpperCamelCase =get_efficientnet_config(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =EfficientNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() __UpperCamelCase =hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) __UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ ) replace_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Initialize preprocessor and preprocess input image __UpperCamelCase =convert_image_processor(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCamelCase =hf_model(**SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =outputs.logits.detach().numpy() # Original model inference __UpperCamelCase =False __UpperCamelCase =CONFIG_MAP[model_name]['image_size'] __UpperCamelCase =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCamelCase =image.img_to_array(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=0 ) __UpperCamelCase =original_model.predict(SCREAMING_SNAKE_CASE__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): os.mkdir(SCREAMING_SNAKE_CASE__ ) # Save converted model and image processor hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) preprocessor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: # Push model and image processor to hub print(F'Pushing converted {model_name} to the hub...' ) __UpperCamelCase =F'efficientnet-{model_name}' preprocessor.push_to_hub(SCREAMING_SNAKE_CASE__ ) hf_model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') _A = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
62
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = GPTaTokenizer UpperCAmelCase__ : Any = GPTaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : int = {"add_prefix_space": True} UpperCAmelCase__ : Any = False def _a ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) ) __UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _a ( self , **A_ ) -> str: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='lower newer' __UpperCamelCase ='lower newer' return input_text, output_text def _a ( self ) -> List[Any]: __UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase ='lower newer' __UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) self.assertListEqual(A_ , A_ ) __UpperCamelCase =tokens + [tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self ) -> int: if not self.test_rust_tokenizer: return __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase ='lower newer' # Testing tokenization __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids without special tokens __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids with special tokens __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # Testing the unknown token __UpperCamelCase =tokens + [rust_tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self , *A_ , **A_ ) -> Optional[int]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _a ( self , A_=15 ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) def _a ( self ) -> int: __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __UpperCamelCase =tokenizer.pad_token_id __UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) __UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ='$$$' __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ ) __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =tokenizer.bos_token_id __UpperCamelCase =tokenizer(A_ ) __UpperCamelCase =tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] , A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCamelCase =tokenizer.decode(out_s.input_ids ) __UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def _a ( self ) -> Optional[int]: pass def _a ( self ) -> Any: # TODO: change to self.get_tokenizers() when the fast version is implemented __UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __UpperCamelCase ='Encode this.' __UpperCamelCase ='This one too please.' __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ ) __UpperCamelCase =tokenizer.encode_plus( A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , ) __UpperCamelCase =encoded_sequence_dict['input_ids'] __UpperCamelCase =encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(A_ ) , len(A_ ) ) __UpperCamelCase =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] __UpperCamelCase =[x for x in filtered_sequence if x is not None] self.assertEqual(A_ , A_ ) @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Optional[Any]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) def _a ( self ) -> Dict: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # Same as above self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def _a ( self ) -> List[Any]: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='bos' __UpperCamelCase =tokenizer.get_vocab()['bos'] __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # We changed the bos token self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
62
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def _a( UpperCamelCase__ : Optional[Any] ): '''simple docstring''' if "resnet-50" in model_name: SCREAMING_SNAKE_CASE__ : List[str] =ResNetConfig.from_pretrained('''microsoft/resnet-50''' ) elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE__ : Dict =ResNetConfig.from_pretrained('''microsoft/resnet-101''' ) else: raise ValueError('''Model name should include either resnet50 or resnet101''' ) SCREAMING_SNAKE_CASE__ : List[str] =DetrConfig(use_timm_backbone=UpperCamelCase__, backbone_config=UpperCamelCase__ ) # set label attributes SCREAMING_SNAKE_CASE__ : Tuple ='''panoptic''' in model_name if is_panoptic: SCREAMING_SNAKE_CASE__ : List[str] =2_5_0 else: SCREAMING_SNAKE_CASE__ : Optional[Any] =9_1 SCREAMING_SNAKE_CASE__ : Any ='''huggingface/label-files''' SCREAMING_SNAKE_CASE__ : str ='''coco-detection-id2label.json''' SCREAMING_SNAKE_CASE__ : int =json.load(open(hf_hub_download(UpperCamelCase__, UpperCamelCase__, repo_type='''dataset''' ), '''r''' ) ) SCREAMING_SNAKE_CASE__ : int ={int(UpperCamelCase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[int] =idalabel SCREAMING_SNAKE_CASE__ : List[Any] ={v: k for k, v in idalabel.items()} return config, is_panoptic def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =[] # stem # fmt: off rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') ) rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') ) rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') ) rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') ) rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) return rename_keys def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =state_dict.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =val def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] ='''''' if is_panoptic: SCREAMING_SNAKE_CASE__ : str ='''detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] =state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Optional[int] =in_proj_weight[:2_5_6, :] SCREAMING_SNAKE_CASE__ : Union[str, Any] =in_proj_bias[:2_5_6] SCREAMING_SNAKE_CASE__ : Optional[int] =in_proj_weight[2_5_6:5_1_2, :] SCREAMING_SNAKE_CASE__ : List[Any] =in_proj_bias[2_5_6:5_1_2] SCREAMING_SNAKE_CASE__ : Dict =in_proj_weight[-2_5_6:, :] SCREAMING_SNAKE_CASE__ : Optional[Any] =in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE__ : int =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Optional[Any] =in_proj_weight[:2_5_6, :] SCREAMING_SNAKE_CASE__ : Any =in_proj_bias[:2_5_6] SCREAMING_SNAKE_CASE__ : Tuple =in_proj_weight[2_5_6:5_1_2, :] SCREAMING_SNAKE_CASE__ : str =in_proj_bias[2_5_6:5_1_2] SCREAMING_SNAKE_CASE__ : Optional[Any] =in_proj_weight[-2_5_6:, :] SCREAMING_SNAKE_CASE__ : Any =in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE__ : Union[str, Any] =state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE__ : int =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE__ : Any =in_proj_weight_cross_attn[:2_5_6, :] SCREAMING_SNAKE_CASE__ : Any =in_proj_bias_cross_attn[:2_5_6] SCREAMING_SNAKE_CASE__ : Tuple =in_proj_weight_cross_attn[2_5_6:5_1_2, :] SCREAMING_SNAKE_CASE__ : int =in_proj_bias_cross_attn[2_5_6:5_1_2] SCREAMING_SNAKE_CASE__ : List[Any] =in_proj_weight_cross_attn[-2_5_6:, :] SCREAMING_SNAKE_CASE__ : Any =in_proj_bias_cross_attn[-2_5_6:] def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE__ : List[str] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str]=None, UpperCamelCase__ : str=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =get_detr_config(UpperCamelCase__ ) # load original model from torch hub SCREAMING_SNAKE_CASE__ : List[Any] ={ '''detr-resnet-50''': '''detr_resnet50''', '''detr-resnet-101''': '''detr_resnet101''', } logger.info(f"Converting model {model_name}..." ) SCREAMING_SNAKE_CASE__ : int =torch.hub.load('''facebookresearch/detr''', model_name_to_original_name[model_name], pretrained=UpperCamelCase__ ).eval() SCREAMING_SNAKE_CASE__ : str =detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCamelCase__ ): if is_panoptic: SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''detr.''' + src rename_key(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase__, is_panoptic=UpperCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE__ : str ='''detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE__ : Dict =state_dict.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: SCREAMING_SNAKE_CASE__ : Any =state_dict.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =state_dict.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE__ : Optional[int] =DetrForSegmentation(UpperCamelCase__ ) if is_panoptic else DetrForObjectDetection(UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE__ : int ='''coco_panoptic''' if is_panoptic else '''coco_detection''' SCREAMING_SNAKE_CASE__ : Optional[Any] =DetrImageProcessor(format=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =processor(images=prepare_img(), return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : Dict =encoding['''pixel_values'''] SCREAMING_SNAKE_CASE__ : str =detr(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =model(UpperCamelCase__ ) assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1e-3 ) assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: # Upload model and image processor to the hub logger.info('''Uploading PyTorch model and image processor to the hub...''' ) model.push_to_hub(f"nielsr/{model_name}" ) processor.push_to_hub(f"nielsr/{model_name}" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') a_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
222
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
222
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ): '''simple docstring''' a , a = coefficient_matrix.shape a , a = constant_matrix.shape if rowsa != colsa: a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(A ) if colsa != 1: a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(A ) if rowsa != rowsa: a = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(A ) if len(A ) != rowsa: a = ( "Number of initial values must be equal to number of rows in coefficient " F"""matrix but received {len(A )} and {rowsa}""" ) raise ValueError(A ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) a = np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) a , a = table.shape strictly_diagonally_dominant(A ) # Iterates the whole matrix for given number of times for _ in range(A ): a = [] for row in range(A ): a = 0 for col in range(A ): if col == row: a = table[row][col] elif col == cols - 1: a = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] a = (temp + val) / denom new_val.append(A ) a = new_val return [float(A ) for i in new_val] def __magic_name__ ( A : NDArray[floataa] ): '''simple docstring''' a , a = table.shape a = True for i in range(0, A ): a = 0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
107
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
0
def UpperCAmelCase ( a_ ) -> int: """simple docstring""" __A = [1] __A , __A , __A = 0, 0, 0 __A = ugly_nums[ia] * 2 __A = ugly_nums[ia] * 3 __A = ugly_nums[ia] * 5 for _ in range(1 , a_ ): __A = min(a_ , a_ , a_ ) ugly_nums.append(a_ ) if next_num == next_a: ia += 1 __A = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __A = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __A = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(200) = }''')
124
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = BertJapaneseTokenizer snake_case_ = False snake_case_ = True def UpperCamelCase_ ( self : List[Any] ): super().setUp() __A = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", ] __A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCamelCase_ ( self : Union[str, Any] ,A : Optional[Any] ): __A = "こんにちは、世界。 \nこんばんは、世界。" __A = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def UpperCamelCase_ ( self : Any ,A : Optional[int] ): __A , __A = self.get_input_output_texts(A ) __A = tokenizer.encode(A ,add_special_tokens=A ) __A = tokenizer.decode(A ,clean_up_tokenization_spaces=A ) return text, ids def UpperCamelCase_ ( self : int ): pass # TODO add if relevant def UpperCamelCase_ ( self : int ): pass # TODO add if relevant def UpperCamelCase_ ( self : Optional[int] ): pass # TODO add if relevant def UpperCamelCase_ ( self : List[Any] ): __A = self.tokenizer_class(self.vocab_file ) __A = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" ) self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] ) def UpperCamelCase_ ( self : int ): __A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="mecab" ) self.assertIsNotNone(A ) __A = "こんにちは、世界。\nこんばんは、世界。" __A = tokenizer.tokenize(A ) self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] ) __A = os.path.join(self.tmpdirname ,"tokenizer.bin" ) with open(A ,"wb" ) as handle: pickle.dump(A ,A ) with open(A ,"rb" ) as handle: __A = pickle.load(A ) __A = tokenizer_new.tokenize(A ) self.assertListEqual(A ,A ) def UpperCamelCase_ ( self : Any ): __A = MecabTokenizer(mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,) def UpperCamelCase_ ( self : List[str] ): try: __A = MecabTokenizer(mecab_dic="unidic_lite" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,) def UpperCamelCase_ ( self : Tuple ): try: __A = MecabTokenizer(mecab_dic="unidic" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,) def UpperCamelCase_ ( self : Tuple ): __A = MecabTokenizer(do_lower_case=A ,mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] ,) def UpperCamelCase_ ( self : Any ): try: __A = MecabTokenizer( do_lower_case=A ,normalize_text=A ,mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] ,) def UpperCamelCase_ ( self : int ): __A = MecabTokenizer(normalize_text=A ,mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] ,) @require_sudachi def UpperCamelCase_ ( self : int ): __A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="sudachi" ) self.assertIsNotNone(A ) __A = "こんにちは、世界。\nこんばんは、世界。" __A = tokenizer.tokenize(A ) self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] ) __A = os.path.join(self.tmpdirname ,"tokenizer.bin" ) with open(A ,"wb" ) as handle: pickle.dump(A ,A ) with open(A ,"rb" ) as handle: __A = pickle.load(A ) __A = tokenizer_new.tokenize(A ) self.assertListEqual(A ,A ) @require_sudachi def UpperCamelCase_ ( self : Optional[int] ): __A = SudachiTokenizer(sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] ,) @require_sudachi def UpperCamelCase_ ( self : List[Any] ): __A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="A" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国", "人", "参政", "権"] ) @require_sudachi def UpperCamelCase_ ( self : int ): __A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="B" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国人", "参政権"] ) @require_sudachi def UpperCamelCase_ ( self : Tuple ): __A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="C" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国人参政権"] ) @require_sudachi def UpperCamelCase_ ( self : int ): __A = SudachiTokenizer(do_lower_case=A ,sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] ,) @require_sudachi def UpperCamelCase_ ( self : List[str] ): __A = SudachiTokenizer(normalize_text=A ,sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] ,) @require_sudachi def UpperCamelCase_ ( self : str ): __A = SudachiTokenizer(trim_whitespace=A ,sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,) @require_jumanpp def UpperCamelCase_ ( self : Optional[Any] ): __A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="jumanpp" ) self.assertIsNotNone(A ) __A = "こんにちは、世界。\nこんばんは、世界。" __A = tokenizer.tokenize(A ) self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] ) __A = os.path.join(self.tmpdirname ,"tokenizer.bin" ) with open(A ,"wb" ) as handle: pickle.dump(A ,A ) with open(A ,"rb" ) as handle: __A = pickle.load(A ) __A = tokenizer_new.tokenize(A ) self.assertListEqual(A ,A ) @require_jumanpp def UpperCamelCase_ ( self : int ): __A = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,) @require_jumanpp def UpperCamelCase_ ( self : str ): __A = JumanppTokenizer(do_lower_case=A ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,) @require_jumanpp def UpperCamelCase_ ( self : Any ): __A = JumanppTokenizer(normalize_text=A ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,) @require_jumanpp def UpperCamelCase_ ( self : List[str] ): __A = JumanppTokenizer(trim_whitespace=A ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] ,) @require_jumanpp def UpperCamelCase_ ( self : Dict ): __A = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) ,["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] ,) def UpperCamelCase_ ( self : str ): __A = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] __A = {} for i, token in enumerate(A ): __A = i __A = WordpieceTokenizer(vocab=A ,unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) ,[] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) ,["こんにちは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは" ) ,["こん", "##ばんは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) ,["こん", "##ばんは", "[UNK]", "こんにちは"] ) def UpperCamelCase_ ( self : Any ): __A = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" ) __A = tokenizer.subword_tokenizer __A = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" ) self.assertListEqual(A ,["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] ) __A = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" ) self.assertListEqual(A ,["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] ) def UpperCamelCase_ ( self : int ): __A = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" ) __A = tokenizer.encode("ありがとう。" ,add_special_tokens=A ) __A = tokenizer.encode("どういたしまして。" ,add_special_tokens=A ) __A = tokenizer.build_inputs_with_special_tokens(A ) __A = tokenizer.build_inputs_with_special_tokens(A ,A ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = BertJapaneseTokenizer snake_case_ = False def UpperCamelCase_ ( self : Any ): super().setUp() __A = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] __A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCamelCase_ ( self : int ,**A : str ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="character" ,**A ) def UpperCamelCase_ ( self : List[str] ,A : str ): __A = "こんにちは、世界。 \nこんばんは、世界。" __A = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def UpperCamelCase_ ( self : str ): pass # TODO add if relevant def UpperCamelCase_ ( self : Optional[Any] ): pass # TODO add if relevant def UpperCamelCase_ ( self : Any ): pass # TODO add if relevant def UpperCamelCase_ ( self : str ): __A = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="character" ) __A = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" ) self.assertListEqual( A ,["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def UpperCamelCase_ ( self : Union[str, Any] ): __A = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] __A = {} for i, token in enumerate(A ): __A = i __A = CharacterTokenizer(vocab=A ,unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) ,[] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) ,["こ", "ん", "に", "ち", "は"] ) self.assertListEqual(tokenizer.tokenize("こんにちほ" ) ,["こ", "ん", "に", "ち", "[UNK]"] ) def UpperCamelCase_ ( self : Tuple ): __A = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" ) __A = tokenizer.encode("ありがとう。" ,add_special_tokens=A ) __A = tokenizer.encode("どういたしまして。" ,add_special_tokens=A ) __A = tokenizer.build_inputs_with_special_tokens(A ) __A = tokenizer.build_inputs_with_special_tokens(A ,A ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : int ): __A = "cl-tohoku/bert-base-japanese" __A = AutoTokenizer.from_pretrained(A ) self.assertIsInstance(A ,A ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Tuple ): __A = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers" ,level="WARNING" ) as cm: BertTokenizer.from_pretrained(A ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) __A = "bert-base-cased" with self.assertLogs("transformers" ,level="WARNING" ) as cm: BertJapaneseTokenizer.from_pretrained(A ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
124
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''microsoft/table-transformer-detection''': ( '''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json''' ), } class lowerCAmelCase_ ( snake_case_ ): UpperCAmelCase__ : int = "table-transformer" UpperCAmelCase__ : int = ["past_key_values"] UpperCAmelCase__ : Optional[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu", SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_="sine", SCREAMING_SNAKE_CASE_="resnet50", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1, **SCREAMING_SNAKE_CASE_, ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) UpperCamelCase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(__UpperCAmelCase, __UpperCAmelCase ): UpperCamelCase : Optional[Any] = backbone_config.get('model_type' ) UpperCamelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] UpperCamelCase : Tuple = config_class.from_dict(__UpperCAmelCase ) # set timm attributes to None UpperCamelCase , UpperCamelCase , UpperCamelCase : int = None, None, None UpperCamelCase : int = use_timm_backbone UpperCamelCase : str = backbone_config UpperCamelCase : str = num_channels UpperCamelCase : Any = num_queries UpperCamelCase : List[str] = d_model UpperCamelCase : List[Any] = encoder_ffn_dim UpperCamelCase : Tuple = encoder_layers UpperCamelCase : List[str] = encoder_attention_heads UpperCamelCase : Tuple = decoder_ffn_dim UpperCamelCase : str = decoder_layers UpperCamelCase : Tuple = decoder_attention_heads UpperCamelCase : Any = dropout UpperCamelCase : Dict = attention_dropout UpperCamelCase : Union[str, Any] = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : List[str] = init_std UpperCamelCase : Dict = init_xavier_std UpperCamelCase : int = encoder_layerdrop UpperCamelCase : List[Any] = decoder_layerdrop UpperCamelCase : List[str] = encoder_layers UpperCamelCase : List[Any] = auxiliary_loss UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = backbone UpperCamelCase : Optional[Any] = use_pretrained_backbone UpperCamelCase : Any = dilation # Hungarian matcher UpperCamelCase : Dict = class_cost UpperCamelCase : Any = bbox_cost UpperCamelCase : Optional[int] = giou_cost # Loss coefficients UpperCamelCase : List[str] = mask_loss_coefficient UpperCamelCase : List[Any] = dice_loss_coefficient UpperCamelCase : Tuple = bbox_loss_coefficient UpperCamelCase : Tuple = giou_loss_coefficient UpperCamelCase : Tuple = eos_coefficient super().__init__(is_encoder_decoder=__UpperCAmelCase, **__UpperCAmelCase ) @property def snake_case_ ( self ) -> Union[str, Any]: return self.encoder_attention_heads @property def snake_case_ ( self ) -> List[str]: return self.d_model class lowerCAmelCase_ ( snake_case_ ): UpperCAmelCase__ : int = version.parse("1.11" ) @property def snake_case_ ( self ) -> str: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def snake_case_ ( self ) -> List[str]: return 1e-5 @property def snake_case_ ( self ) -> Dict: return 12
119
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : @staticmethod def SCREAMING_SNAKE_CASE_( *lowercase , **lowercase ) -> Optional[Any]: pass @is_pipeline_test @require_vision @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int: lowerCamelCase_ = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowerCamelCase_ = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict: lowerCamelCase_ = object_detector(examples[0] , threshold=0.0 ) lowerCamelCase_ = len(lowercase ) self.assertGreater(lowercase , 0 ) self.assertEqual( lowercase , [ { "score": ANY(lowercase ), "label": ANY(lowercase ), "box": {"xmin": ANY(lowercase ), "ymin": ANY(lowercase ), "xmax": ANY(lowercase ), "ymax": ANY(lowercase )}, } for i in range(lowercase ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def SCREAMING_SNAKE_CASE_( self ) -> Tuple: pass @require_torch def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowerCamelCase_ = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] , ) lowerCamelCase_ = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [ {"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = pipeline("zero-shot-object-detection" ) lowerCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ] , ) lowerCamelCase_ = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: pass @require_torch @slow def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = 0.2 lowerCamelCase_ = pipeline("zero-shot-object-detection" ) lowerCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowercase , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE_( self ) -> Tuple: lowerCamelCase_ = 2 lowerCamelCase_ = pipeline("zero-shot-object-detection" ) lowerCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowercase , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ] , )
47
from collections import defaultdict def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = first_str.lower().strip() lowerCamelCase_ = second_str.lower().strip() # Remove whitespace lowerCamelCase_ = first_str.replace(" " , "" ) lowerCamelCase_ = second_str.replace(" " , "" ) # Strings of different lengths are not anagrams if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): return False # Default values for count should be 0 lowerCamelCase_ = defaultdict(lowerCamelCase__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowerCamelCase__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __A =input('''Enter the first string ''').strip() __A =input('''Enter the second string ''').strip() __A =check_anagrams(input_a, input_b) print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
47
1
def __lowerCamelCase ( __a :int , __a :int ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def __lowerCamelCase ( ) -> None: """simple docstring""" print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' ) print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' ) print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' ) print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' ) if __name__ == "__main__": import doctest doctest.testmod() main()
274
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
1
"""simple docstring""" import re def __lowerCamelCase ( __UpperCamelCase ) -> bool: """simple docstring""" lowerCAmelCase_ : Optional[Any] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
367
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray: """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray: """simple docstring""" return (gray > 127) & (gray <= 255) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> np.ndarray: """simple docstring""" lowerCAmelCase_ : List[str] = np.zeros_like(__UpperCamelCase ) lowerCAmelCase_ : Dict = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowerCAmelCase_ : List[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowerCAmelCase_ : List[str] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowerCAmelCase_ : int = int(summation > 0 ) return output if __name__ == "__main__": # read original image lowercase__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" lowercase__ = np.array(Image.open(lena_path)) # kernel to be applied lowercase__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowercase__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowercase__ = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
161
0
import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ): lowerCAmelCase__ = PhobertTokenizer lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE_( self ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = ["T@@", "i", "I", "R@@", "r", "e@@"] lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l à</w>"] lowerCamelCase_ = {"unk_token": "<unk>"} lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase ) ) def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> List[str]: kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]: lowerCamelCase_ = "Tôi là VinAI Research" lowerCamelCase_ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase_ = "Tôi là VinAI Research" lowerCamelCase_ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() lowerCamelCase_ = tokenizer.tokenize(lowercase ) print(lowercase ) self.assertListEqual(lowercase , lowercase ) lowerCamelCase_ = tokens + [tokenizer.unk_token] lowerCamelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
19
def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' _UpperCAmelCase = len(snake_case_ ) for i in range(snake_case_ ): for j in range(i + 1 , snake_case_ ): if numbers[j] < numbers[i]: _UpperCAmelCase , _UpperCAmelCase = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() lowercase_ : Dict = [int(item) for item in user_input.split(',')] print(exchange_sort(unsorted))
133
0
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any: # noqa: E741 while r - l > 1: __snake_case: Optional[Any] = (l + r) // 2 if v[m] >= key: __snake_case: Optional[int] = m else: __snake_case: Optional[int] = m # noqa: E741 return r def A__ ( SCREAMING_SNAKE_CASE__) -> int: if len(_UpperCamelCase) == 0: return 0 __snake_case: Tuple = [0] * len(_UpperCamelCase) __snake_case: Any = 1 __snake_case: Tuple = v[0] for i in range(1 , len(_UpperCamelCase)): if v[i] < tail[0]: __snake_case: Any = v[i] elif v[i] > tail[length - 1]: __snake_case: List[Any] = v[i] length += 1 else: __snake_case: Optional[Any] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
368
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = DownBlockaD # noqa F405 lowerCAmelCase__ = """down""" def UpperCAmelCase__ ( self : Any ): __snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405 lowerCAmelCase__ = """down""" def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AttnDownBlockaD # noqa F405 lowerCAmelCase__ = """down""" def UpperCAmelCase__ ( self : Any ): __snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405 lowerCAmelCase__ = """down""" def UpperCAmelCase__ ( self : List[str] ): __snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common() __snake_case: List[Any] = 32 return init_dict, inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ): __snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405 lowerCAmelCase__ = """down""" @property def UpperCAmelCase__ ( self : Tuple ): return super().get_dummy_input(include_encoder_hidden_states=A ) def UpperCAmelCase__ ( self : int ): __snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common() __snake_case: Optional[Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def UpperCAmelCase__ ( self : List[Any] ): __snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = SkipDownBlockaD # noqa F405 lowerCAmelCase__ = """down""" @property def UpperCAmelCase__ ( self : Any ): return super().get_dummy_input(include_skip_sample=A ) def UpperCAmelCase__ ( self : Any ): __snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405 lowerCAmelCase__ = """down""" @property def UpperCAmelCase__ ( self : List[Any] ): return super().get_dummy_input(include_skip_sample=A ) def UpperCAmelCase__ ( self : int ): __snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = DownEncoderBlockaD # noqa F405 lowerCAmelCase__ = """down""" @property def UpperCAmelCase__ ( self : Union[str, Any] ): return super().get_dummy_input(include_temb=A ) def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: str = { """in_channels""": 32, """out_channels""": 32, } __snake_case: Dict = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : str ): __snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405 lowerCAmelCase__ = """down""" @property def UpperCAmelCase__ ( self : List[str] ): return super().get_dummy_input(include_temb=A ) def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: Optional[Any] = { """in_channels""": 32, """out_channels""": 32, } __snake_case: Tuple = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = UNetMidBlockaD # noqa F405 lowerCAmelCase__ = """mid""" def UpperCAmelCase__ ( self : str ): __snake_case: Optional[int] = { """in_channels""": 32, """temb_channels""": 128, } __snake_case: List[str] = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : str ): __snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405 lowerCAmelCase__ = """mid""" def UpperCAmelCase__ ( self : str ): __snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common() __snake_case: int = 32 return init_dict, inputs_dict def UpperCAmelCase__ ( self : Dict ): __snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405 lowerCAmelCase__ = """mid""" @property def UpperCAmelCase__ ( self : Optional[int] ): return super().get_dummy_input(include_encoder_hidden_states=A ) def UpperCAmelCase__ ( self : str ): __snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common() __snake_case: str = 32 return init_dict, inputs_dict def UpperCAmelCase__ ( self : Dict ): __snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = UpBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : Tuple ): return super().get_dummy_input(include_res_hidden_states_tuple=A ) def UpperCAmelCase__ ( self : Tuple ): __snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : Tuple ): return super().get_dummy_input(include_res_hidden_states_tuple=A ) def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : Optional[int] ): return super().get_dummy_input(include_res_hidden_states_tuple=A ) def UpperCAmelCase__ ( self : Dict ): __snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common() __snake_case: Optional[int] = 32 return init_dict, inputs_dict def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : Optional[Any] ): return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A ) def UpperCAmelCase__ ( self : Dict ): __snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common() __snake_case: str = 32 return init_dict, inputs_dict def UpperCAmelCase__ ( self : List[Any] ): __snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AttnUpBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : int ): return super().get_dummy_input(include_res_hidden_states_tuple=A ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def UpperCAmelCase__ ( self : List[str] ): __snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = SkipUpBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : str ): return super().get_dummy_input(include_res_hidden_states_tuple=A ) def UpperCAmelCase__ ( self : Dict ): __snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : str ): return super().get_dummy_input(include_res_hidden_states_tuple=A ) def UpperCAmelCase__ ( self : Optional[Any] ): __snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = UpDecoderBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : Optional[int] ): return super().get_dummy_input(include_temb=A ) def UpperCAmelCase__ ( self : str ): __snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32} __snake_case: Dict = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Any ): __snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(A ) class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405 lowerCAmelCase__ = """up""" @property def UpperCAmelCase__ ( self : Optional[Any] ): return super().get_dummy_input(include_temb=A ) def UpperCAmelCase__ ( self : Optional[Any] ): __snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32} __snake_case: Any = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : int ): __snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(A )
293
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class A__ : def __init__( self , __magic_name__ , ): lowerCamelCase : Any = parent lowerCamelCase : Any = 1_3 lowerCamelCase : Dict = 7 lowerCamelCase : List[str] = 3_0 lowerCamelCase : str = self.seq_length + self.mem_len lowerCamelCase : int = 1_5 lowerCamelCase : List[Any] = True lowerCamelCase : List[str] = True lowerCamelCase : List[str] = 9_9 lowerCamelCase : Optional[int] = [1_0, 5_0, 8_0] lowerCamelCase : Dict = 3_2 lowerCamelCase : List[Any] = 3_2 lowerCamelCase : Optional[int] = 4 lowerCamelCase : Optional[Any] = 8 lowerCamelCase : Union[str, Any] = 1_2_8 lowerCamelCase : Any = 2 lowerCamelCase : List[str] = 2 lowerCamelCase : Optional[int] = None lowerCamelCase : Optional[int] = 1 lowerCamelCase : int = 0 lowerCamelCase : Tuple = 3 lowerCamelCase : List[str] = self.vocab_size - 1 lowerCamelCase : Tuple = 0.01 def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Tuple = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCamelCase__ ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Optional[Any] = TFTransfoXLModel(__a ) lowerCamelCase : Optional[Any] = model(__a ).to_tuple() lowerCamelCase : Union[str, Any] = {'input_ids': input_ids_a, 'mems': mems_a} lowerCamelCase : Optional[Any] = model(__a ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Tuple = TFTransfoXLLMHeadModel(__a ) lowerCamelCase : Optional[int] = model(__a ).to_tuple() lowerCamelCase : Any = {'input_ids': input_ids_a, 'labels': lm_labels} lowerCamelCase : Any = model(__a ).to_tuple() lowerCamelCase : List[Any] = model([input_ids_a, mems_a] ).to_tuple() lowerCamelCase : Dict = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels} lowerCamelCase : Tuple = model(__a ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : int = TFTransfoXLForSequenceClassification(__a ) lowerCamelCase : str = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : int = self.prepare_config_and_inputs() (lowerCamelCase) : str = config_and_inputs lowerCamelCase : Optional[int] = {'input_ids': input_ids_a} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase): _UpperCAmelCase : Optional[int] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _UpperCAmelCase : int = () if is_tf_available() else () _UpperCAmelCase : Tuple = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Any = False _UpperCAmelCase : Optional[Any] = False def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = TFTransfoXLModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , d_embed=3_7 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): self.model_tester.set_seed() lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*__a ) def UpperCamelCase__ ( self ): self.model_tester.set_seed() lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*__a ) def UpperCamelCase__ ( self ): lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__a ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : str = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowerCamelCase : Tuple = model_class(__a ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowerCamelCase : Union[str, Any] = model.get_output_embeddings() assert isinstance(__a , tf.keras.layers.Layer ) lowerCamelCase : List[str] = model.get_bias() assert name is None else: lowerCamelCase : Any = model.get_output_embeddings() assert x is None lowerCamelCase : str = model.get_bias() assert name is None def UpperCamelCase__ ( self ): pass @slow def UpperCamelCase__ ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Tuple = TFTransfoXLModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason="""This model doesn\'t play well with fit() due to not returning a single loss.""" ) def UpperCamelCase__ ( self ): pass @require_tf class A__ ( unittest.TestCase): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Dict = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off lowerCamelCase : List[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowerCamelCase : List[Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowerCamelCase : Optional[Any] = model.generate(__a , max_length=2_0_0 , do_sample=__a ) self.assertListEqual(output_ids[0].numpy().tolist() , __a )
287
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) __a : Tuple = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )] __a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE ) __a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : List[str] = [] __a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): __a : Any = [] __a : str = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): __a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Tuple = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() __a : List[Any] = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a : List[str] = test.model_tester.__class__ return model_tester def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ): __a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = [] for test_class in test_classes: __a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = get_test_classes(_SCREAMING_SNAKE_CASE ) __a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE ) __a : str = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
27
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[str]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: UpperCamelCase :int = k.replace(lowercase__ , lowercase__ ) if k.startswith("""encoder""" ): UpperCamelCase :Optional[int] = k.replace(""".attn""" , """.self_attn""" ) UpperCamelCase :List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" ) UpperCamelCase :Tuple = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): UpperCamelCase :int = k.replace("""norm1""" , """self_attn_layer_norm""" ) UpperCamelCase :List[str] = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) UpperCamelCase :Optional[Any] = k.replace("""norm3""" , """final_layer_norm""" ) return k def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase :List[str] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: UpperCamelCase :str = sd.pop(lowercase__ ) UpperCamelCase :int = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd UpperCamelCase :List[Any] = v UpperCAmelCase_ : Any = ['''START'''] @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCamelCase :Dict = torch.load(lowercase__ , map_location="""cpu""" ) UpperCamelCase :str = model["""model"""] UpperCamelCase :Optional[Any] = BlenderbotConfig.from_json_file(lowercase__ ) UpperCamelCase :Tuple = BlenderbotForConditionalGeneration(lowercase__ ) UpperCamelCase :Optional[Any] = m.model.state_dict().keys() UpperCamelCase :Optional[int] = [] UpperCamelCase :Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue UpperCamelCase :int = rename_state_dict_key(lowercase__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: UpperCamelCase :List[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowercase__ ) m.model.load_state_dict(lowercase__ , strict=lowercase__ ) m.half() m.save_pretrained(lowercase__ ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) UpperCAmelCase_ : List[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
356
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tuple[int, int] , __magic_name__ : int ) -> list[tuple[int, int]]: """simple docstring""" UpperCamelCase , UpperCamelCase :Union[str, Any] = position UpperCamelCase :int = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase :Tuple = [] for position in positions: UpperCamelCase , UpperCamelCase :int = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__magic_name__ ) return permissible_positions def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> bool: """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : tuple[int, int] , __magic_name__ : int ) -> bool: """simple docstring""" if is_complete(__magic_name__ ): return True for position in get_valid_pos(__magic_name__ , len(__magic_name__ ) ): UpperCamelCase , UpperCamelCase :Any = position if board[y][x] == 0: UpperCamelCase :Optional[int] = curr + 1 if open_knight_tour_helper(__magic_name__ , __magic_name__ , curr + 1 ): return True UpperCamelCase :List[Any] = 0 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> list[list[int]]: """simple docstring""" UpperCamelCase :Any = [[0 for i in range(__magic_name__ )] for j in range(__magic_name__ )] for i in range(__magic_name__ ): for j in range(__magic_name__ ): UpperCamelCase :int = 1 if open_knight_tour_helper(__magic_name__ , (i, j) , 1 ): return board UpperCamelCase :Optional[int] = 0 UpperCamelCase :Union[str, Any] = f"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
62
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) snake_case_ : Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(__magic_name__ ) from datasets import load_dataset snake_case_ : Optional[Any] = load_dataset('''nielsr/rvlcdip-demo''' ) snake_case_ : Any = dataset['''train'''][0]['''image'''].convert('''RGB''' ) snake_case_ : Tuple = image_processor(__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) snake_case_ : int = outputs.logits snake_case_ : Dict = torch.Size((1, 16) ) self.assertEqual(logits.shape , __magic_name__ ) snake_case_ : int = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=__magic_name__ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
279
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase_ = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase_ = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) ) snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(_UpperCamelCase )) @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : str = PokerHand(_UpperCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS] snake_case_ : str = poker_hands.copy() shuffle(_UpperCamelCase ) snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) ) for index, hand in enumerate(_UpperCamelCase ): assert hand == poker_hands[index] def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=_UpperCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' ) snake_case_ : str = True snake_case_ : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : List[str] = 0 snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) ) snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' ) with open(_UpperCamelCase ) as file_hand: for line in file_hand: snake_case_ : Dict = line[:14].strip() snake_case_ : List[str] = line[15:].strip() snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase ) snake_case_ : int = player.compare_with(_UpperCamelCase ) if output == "Win": answer += 1 assert answer == 376
279
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
108
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class snake_case ( __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states""" @property def lowercase_ ( self : Dict)-> str: '''simple docstring''' __lowerCAmelCase: str = 4 __lowerCAmelCase: int = 8 __lowerCAmelCase: int = 7 __lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str: '''simple docstring''' torch.manual_seed(UpperCamelCase__) __lowerCAmelCase: List[Any] = 4 __lowerCAmelCase: Dict = 8 __lowerCAmelCase: int = 7 __lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowercase_ ( self : Dict)-> List[Any]: '''simple docstring''' return (4, 8) @property def lowercase_ ( self : Optional[int])-> int: '''simple docstring''' return (4, 8) def lowercase_ ( self : Optional[int])-> Tuple: '''simple docstring''' __lowerCAmelCase: str = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } __lowerCAmelCase: Any = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : List[Any])-> int: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(UpperCamelCase__) __lowerCAmelCase: Dict = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def lowercase_ ( self : List[str])-> Tuple: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common() __lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__) __lowerCAmelCase: List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase: List[Any] = [*signature.parameters.keys()] __lowerCAmelCase: Any = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , UpperCamelCase__) def lowercase_ ( self : Optional[int])-> List[str]: '''simple docstring''' __lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") __lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__) if hasattr(UpperCamelCase__ , "set_default_attn_processor"): model.set_default_attn_processor() __lowerCAmelCase: str = self.get_dummy_seed_input() with torch.no_grad(): __lowerCAmelCase: Dict = model(**UpperCamelCase__)[0] __lowerCAmelCase: Dict = output[0, :5].flatten().cpu() print(UpperCamelCase__) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2)) @slow class snake_case ( unittest.TestCase ): def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]: '''simple docstring''' torch.manual_seed(UpperCamelCase__) __lowerCAmelCase: List[Any] = batch_size __lowerCAmelCase: Any = embedding_dim __lowerCAmelCase: Dict = num_embeddings __lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : List[Any])-> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ]) def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior") model.to(UpperCamelCase__) __lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__) with torch.no_grad(): __lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0] assert list(sample.shape) == [1, 7_6_8] __lowerCAmelCase: Dict = sample[0, :8].flatten().cpu() print(UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__) assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
108
1
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]: """simple docstring""" _lowercase =len(__snake_case ) _lowercase =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _lowercase =True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): _lowercase =False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: _lowercase =subset[i - 1][j] if arr[i - 1] <= j: _lowercase =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
5
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __UpperCamelCase : Dict = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ): """simple docstring""" UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! UpperCamelCase__ : str = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' ) UpperCamelCase__ : Union[str, Any] = ''''''.join( re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) ) UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = re.findall( r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 UpperCamelCase__ : Optional[Any] = re.sub( r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , ) UpperCamelCase__ : List[Any] = re.findall( r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ): if index >= max_images: return index UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode( '''unicode-escape''' ) UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode( '''unicode-escape''' ) UpperCamelCase__ : List[Any] = urllib.request.build_opener() UpperCamelCase__ : Optional[Any] = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}" if not os.path.exists(SCREAMING_SNAKE_CASE ): os.makedirs(SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" ) return index if __name__ == "__main__": try: __UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1]) print(f"{image_count} images were downloaded to disk.") except IndexError: print("Please provide a search term.") raise
146
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A : """simple docstring""" def __init__(self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=3_2 , lowerCAmelCase=3 , lowerCAmelCase=1_0 , lowerCAmelCase=[8, 1_6, 3_2, 6_4] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , lowerCAmelCase=["stage2", "stage3", "stage4"] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=1 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= num_channels __lowercase= embeddings_size __lowercase= hidden_sizes __lowercase= depths __lowercase= is_training __lowercase= use_labels __lowercase= hidden_act __lowercase= num_labels __lowercase= scope __lowercase= len(_a ) __lowercase= out_features __lowercase= out_indices __lowercase= num_groups def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= BitModel(config=_a ) model.to(_a ) model.eval() __lowercase= model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= BitForImageClassification(_a ) model.to(_a ) model.eval() __lowercase= model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= BitBackbone(config=_a ) model.to(_a ) model.eval() __lowercase= model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __lowercase= None __lowercase= BitBackbone(config=_a ) model.to(_a ) model.eval() __lowercase= model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase= config_and_inputs __lowercase= {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[str] =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase_ : Optional[Any] =( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : Any =False UpperCamelCase_ : Any =False UpperCamelCase_ : Any =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : str =False def _A (self ): __lowercase= BitModelTester(self ) __lowercase= ConfigTester(self , config_class=_a , has_text_modality=_a ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='Bit does not output attentions' ) def _A (self ): pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(_a ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_a ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(config=_a ) for name, module in model.named_modules(): if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(_a , _a ) ) __lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase= self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowercase= self.model_tester.prepare_config_and_inputs_for_common() __lowercase= ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __lowercase= layer_type __lowercase= True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(_a , _a , _a ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def _A (self ): pass def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def _A (self ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= BitModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def _lowerCamelCase( ) -> Dict: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def _A (self ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _A (self ): __lowercase= BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=_a , return_tensors='pt' ).to(_a ) # forward pass with torch.no_grad(): __lowercase= model(**_a ) # verify the logits __lowercase= torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _a ) __lowercase= torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) ) @require_torch class A ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str =(BitBackbone,) if is_torch_available() else () UpperCamelCase_ : Union[str, Any] =BitConfig UpperCamelCase_ : List[Any] =False def _A (self ): __lowercase= BitModelTester(self )
365
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = """deberta-v2""" def __init__( self , A=1_2_8_1_0_0 , A=1_5_3_6 , A=2_4 , A=2_4 , A=6_1_4_4 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=0 , A=0.02 , A=1e-7 , A=False , A=-1 , A=0 , A=True , A=None , A=0 , A="gelu" , **A , ) -> List[str]: super().__init__(**A ) snake_case : int = hidden_size snake_case : int = num_hidden_layers snake_case : Optional[int] = num_attention_heads snake_case : List[Any] = intermediate_size snake_case : Dict = hidden_act snake_case : List[Any] = hidden_dropout_prob snake_case : Union[str, Any] = attention_probs_dropout_prob snake_case : Tuple = max_position_embeddings snake_case : str = type_vocab_size snake_case : int = initializer_range snake_case : int = relative_attention snake_case : List[str] = max_relative_positions snake_case : List[Any] = pad_token_id snake_case : str = position_biased_input # Backwards compatibility if type(A ) == str: snake_case : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case : Optional[int] = pos_att_type snake_case : str = vocab_size snake_case : str = layer_norm_eps snake_case : Tuple = kwargs.get("""pooler_hidden_size""" , A ) snake_case : Any = pooler_dropout snake_case : str = pooler_hidden_act class __lowercase (UpperCamelCase__ ): """simple docstring""" @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def UpperCAmelCase ( self ) -> int: return 1_2 def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = -1 , A = False , A = None , A = 3 , A = 4_0 , A = 4_0 , A = None , ) -> Mapping[str, Any]: snake_case : Any = super().generate_dummy_inputs(preprocessor=A , framework=A ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
124
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> float: if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) snake_case : Optional[Any] = sum(lowercase ) / len(lowercase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
124
1
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase ( lowercase ): UpperCAmelCase : List[Any] = ["""image_processor""", """tokenizer"""] UpperCAmelCase : str = """BlipImageProcessor""" UpperCAmelCase : List[str] = """AutoTokenizer""" def __init__(self : str , _A : Any , _A : str) -> Union[str, Any]: __snake_case : Dict = False super().__init__(_A , _A) __snake_case : int = self.image_processor def __call__(self : Dict , _A : ImageInput = None , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : int , ) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.') # Get only text if images is None: __snake_case : Optional[Any] = self.tokenizer __snake_case : Optional[int] = self.tokenizer( text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) return text_encoding # add pixel_values __snake_case : Dict = self.image_processor(_A , return_tensors=_A) if text is not None: __snake_case : List[str] = self.tokenizer( text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) else: __snake_case : Optional[int] = None if text_encoding is not None: encoding_image_processor.update(_A) return encoding_image_processor def _lowercase (self : Optional[Any] , *_A : Any , **_A : Tuple) -> str: return self.tokenizer.batch_decode(*_A , **_A) def _lowercase (self : Tuple , *_A : int , **_A : str) -> Tuple: return self.tokenizer.decode(*_A , **_A) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowercase (self : str) -> List[Any]: __snake_case : str = self.tokenizer.model_input_names __snake_case : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
351
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase ( lowercase ): UpperCAmelCase : Optional[int] = """new-model""" if is_tf_available(): class UpperCamelCase ( lowercase ): UpperCAmelCase : List[str] = NewModelConfig @require_tf class UpperCamelCase ( unittest.TestCase ): @slow def _lowercase (self : List[str]) -> Dict: __snake_case : Any = 'bert-base-cased' __snake_case : Optional[Any] = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : Union[str, Any] = TFAutoModel.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : List[Any]) -> str: __snake_case : Optional[int] = 'bert-base-cased' __snake_case : List[Any] = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : Any) -> List[str]: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A) __snake_case , __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : Tuple) -> Dict: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : Union[str, Any]) -> Optional[int]: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_A) __snake_case , __snake_case : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : str) -> Union[str, Any]: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Dict = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(_A) __snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : str) -> str: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __snake_case : Tuple = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : Tuple = TFAutoModelForSequenceClassification.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow def _lowercase (self : Optional[Any]) -> Optional[int]: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __snake_case : List[str] = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : Any = TFAutoModelForQuestionAnswering.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) @slow @require_tensorflow_probability def _lowercase (self : List[Any]) -> List[str]: for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: __snake_case : Optional[Any] = AutoConfig.from_pretrained(_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) __snake_case : int = TFAutoModelForTableQuestionAnswering.from_pretrained(_A) __snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained( _A , output_loading_info=_A) self.assertIsNotNone(_A) self.assertIsInstance(_A , _A) def _lowercase (self : Optional[Any]) -> Optional[Any]: __snake_case : Optional[int] = TFAutoModelWithLMHead.from_pretrained(_A) self.assertIsInstance(_A , _A) self.assertEqual(model.num_parameters() , 1_44_10) self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10) def _lowercase (self : Any) -> List[str]: __snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A) self.assertIsInstance(_A , _A) self.assertEqual(model.num_parameters() , 1_44_10) self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10) def _lowercase (self : Optional[Any]) -> str: # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel __snake_case : Optional[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny') self.assertIsInstance(_A , _A) __snake_case : int = copy.deepcopy(model.config) __snake_case : int = ['FunnelBaseModel'] __snake_case : int = TFAutoModel.from_config(_A) self.assertIsInstance(_A , _A) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A) __snake_case : List[Any] = TFAutoModel.from_pretrained(_A) self.assertIsInstance(_A , _A) def _lowercase (self : List[Any]) -> int: try: AutoConfig.register('new-model' , _A) __snake_case : int = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__): # Wrong config class will raise an error with self.assertRaises(_A): auto_class.register(_A , _A) auto_class.register(_A , _A) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A): auto_class.register(_A , _A) # Now that the config is registered, it can be used as any other config with the auto-API __snake_case : Union[str, Any] = BertModelTester(self).get_config() __snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict()) __snake_case : List[str] = auto_class.from_config(_A) self.assertIsInstance(_A , _A) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A) __snake_case : Tuple = auto_class.from_pretrained(_A) self.assertIsInstance(_A , _A) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _lowercase (self : Optional[int]) -> Union[str, Any]: with self.assertRaisesRegex( _A , 'bert-base is not a local folder and is not a valid model identifier'): __snake_case : Any = TFAutoModel.from_pretrained('bert-base') def _lowercase (self : str) -> str: with self.assertRaisesRegex( _A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'): __snake_case : Optional[Any] = TFAutoModel.from_pretrained(_A , revision='aaaaaa') def _lowercase (self : int) -> Any: with self.assertRaisesRegex( _A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): __snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model') def _lowercase (self : Optional[Any]) -> Any: with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model'): __snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only') def _lowercase (self : str) -> Any: # Make sure we have cached the model. __snake_case : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert') with RequestCounter() as counter: __snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert') self.assertEqual(counter.get_request_count , 0) self.assertEqual(counter.head_request_count , 1) self.assertEqual(counter.other_request_count , 0) # With a sharded checkpoint __snake_case : Optional[int] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded') with RequestCounter() as counter: __snake_case : Any = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded') self.assertEqual(counter.get_request_count , 0) self.assertEqual(counter.head_request_count , 1) self.assertEqual(counter.other_request_count , 0)
95
0
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter lowerCamelCase : Any = True except ImportError: lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def _lowerCAmelCase ( _UpperCamelCase : Namespace ) -> Any: """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class A__ ( A__ ): @staticmethod def A ( _a : ArgumentParser ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' , type=_a , help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' , type=_a , help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=_a ) def __init__( self : Optional[Any] , _a : bool , _a : str , _a : Optional[Any]=None , *_a : List[Any] ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =testing _SCREAMING_SNAKE_CASE =testing_file _SCREAMING_SNAKE_CASE =path def A ( self : str ) -> int: '''simple docstring''' warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory _SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(_a ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) _SCREAMING_SNAKE_CASE =( Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) _SCREAMING_SNAKE_CASE =path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(_a ) ) else: with open(self._testing_file , 'r' ) as configuration_file: _SCREAMING_SNAKE_CASE =json.load(_a ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , ) _SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r' ) as configuration_file: _SCREAMING_SNAKE_CASE =json.load(_a ) _SCREAMING_SNAKE_CASE =configuration['lowercase_modelname'] _SCREAMING_SNAKE_CASE =configuration['generate_tensorflow_pytorch_and_flax'] os.remove(f"{directory}/configuration.json" ) _SCREAMING_SNAKE_CASE ='PyTorch' in generate_tensorflow_pytorch_and_flax _SCREAMING_SNAKE_CASE ='TensorFlow' in generate_tensorflow_pytorch_and_flax _SCREAMING_SNAKE_CASE ='Flax' in generate_tensorflow_pytorch_and_flax _SCREAMING_SNAKE_CASE =f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}" os.makedirs(_a , exist_ok=_a ) os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=_a ) # Tests require submodules as they have parent imports with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , 'w' ): pass shutil.move( f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , ) shutil.move( f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , ) def remove_copy_lines(_a : Any ): with open(_a , 'r' ) as f: _SCREAMING_SNAKE_CASE =f.readlines() with open(_a , 'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(_a ) if output_pytorch: if not self._testing: remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" ) if output_flax: if not self._testing: remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , ) shutil.move( f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(_a : str , _a : str , _a : List[str] ): # Create temp file _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =mkstemp() _SCREAMING_SNAKE_CASE =False with fdopen(_a , 'w' ) as new_file: with open(_a ) as old_file: for line in old_file: new_file.write(_a ) if line_to_copy_below in line: _SCREAMING_SNAKE_CASE =True for line_to_copy in lines_to_copy: new_file.write(_a ) if not line_found: raise ValueError(f"Line {line_to_copy_below} was not found in file." ) # Copy the file permissions from the old file to the new file copymode(_a , _a ) # Remove original file remove(_a ) # Move new file move(_a , _a ) def skip_units(_a : str ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(_a : str ): with open(_a ) as datafile: _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False for line in datafile: if "# To replace in: " in line and "##" not in line: _SCREAMING_SNAKE_CASE =line.split('"' )[1] _SCREAMING_SNAKE_CASE =skip_units(_a ) elif "# Below: " in line and "##" not in line: _SCREAMING_SNAKE_CASE =line.split('"' )[1] _SCREAMING_SNAKE_CASE =skip_units(_a ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(_a , _a , _a ) _SCREAMING_SNAKE_CASE =[] elif "# Replace with" in line and "##" not in line: _SCREAMING_SNAKE_CASE =[] elif "##" not in line: lines_to_copy.append(_a ) remove(_a ) replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" ) os.rmdir(_a )
47
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class A__ ( A__ , A__ ): @register_to_config def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]: '''simple docstring''' super().__init__() _SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) ) _SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) ) def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) ) _SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) ) return self def A ( self : Tuple , _a : str ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std return embeds def A ( self : List[str] , _a : Optional[Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean return embeds
47
1
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class a ( a_ ): _lowercase = "xlm-prophetnet" _lowercase = ["past_key_values"] _lowercase = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self , A_ = 0.1 , A_ = "gelu" , A_ = 30522 , A_ = 1024 , A_ = 4096 , A_ = 12 , A_ = 16 , A_ = 4096 , A_ = 12 , A_ = 16 , A_ = 0.1 , A_ = 0.1 , A_ = 512 , A_ = 0.02 , A_ = True , A_ = True , A_ = 0 , A_ = 2 , A_ = 32 , A_ = 128 , A_ = False , A_ = 0.0 , A_ = True , A_ = 0 , A_ = 1 , A_ = 2 , **A_ , ): '''simple docstring''' _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : Optional[Any] = hidden_size _UpperCAmelCase : Dict = encoder_ffn_dim _UpperCAmelCase : Dict = num_encoder_layers _UpperCAmelCase : Any = num_encoder_attention_heads _UpperCAmelCase : List[str] = decoder_ffn_dim _UpperCAmelCase : Optional[Any] = num_decoder_layers _UpperCAmelCase : Dict = num_decoder_attention_heads _UpperCAmelCase : Optional[Any] = max_position_embeddings _UpperCAmelCase : List[Any] = init_std # Normal(0, this parameter) _UpperCAmelCase : List[str] = activation_function # parameters for xlmprophetnet _UpperCAmelCase : Dict = ngram _UpperCAmelCase : List[Any] = num_buckets _UpperCAmelCase : Dict = relative_max_distance _UpperCAmelCase : Any = disable_ngram_loss _UpperCAmelCase : str = eps # 3 Types of Dropout _UpperCAmelCase : Optional[int] = attention_dropout _UpperCAmelCase : Any = activation_dropout _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = use_cache super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , ) @property def _UpperCAmelCase ( self ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _UpperCAmelCase ( self , A_ ): '''simple docstring''' raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
358
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class a ( UpperCAmelCase ): _lowercase = ["image_processor", "tokenizer"] _lowercase = "OwlViTImageProcessor" _lowercase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , A_=None , A_=None , **A_ ): '''simple docstring''' _UpperCAmelCase : List[str] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , A_ , ) _UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" ) _UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(A_ , A_ ) def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )): _UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )] elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ): _UpperCAmelCase : Optional[int] = [] # Maximum number of queries across batch _UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(A_ ) != max_num_queries: _UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ )) _UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ ) encodings.append(A_ ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _UpperCAmelCase : Optional[int] = BatchEncoding() _UpperCAmelCase : str = input_ids _UpperCAmelCase : Optional[Any] = attention_mask if query_images is not None: _UpperCAmelCase : int = BatchEncoding() _UpperCAmelCase : str = self.image_processor( A_ , return_tensors=A_ , **A_ ).pixel_values _UpperCAmelCase : Optional[Any] = query_pixel_values if images is not None: _UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ ) if text is not None and images is not None: _UpperCAmelCase : Optional[int] = image_features.pixel_values return encoding elif query_images is not None and images is not None: _UpperCAmelCase : Any = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ ) def _UpperCAmelCase ( self , *A_ , **A_ ): '''simple docstring''' return self.image_processor.post_process(*A_ , **A_ ) def _UpperCAmelCase ( self , *A_ , **A_ ): '''simple docstring''' return self.image_processor.post_process_object_detection(*A_ , **A_ ) def _UpperCAmelCase ( self , *A_ , **A_ ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*A_ , **A_ ) def _UpperCAmelCase ( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def _UpperCAmelCase ( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @property def _UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , ) return self.image_processor_class @property def _UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , ) return self.image_processor
189
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : Union[str, Any] = 'open-llama' def __init__(self , __lowercase=10_00_00 , __lowercase=40_96 , __lowercase=1_10_08 , __lowercase=32 , __lowercase=32 , __lowercase="silu" , __lowercase=20_48 , __lowercase=0.0_2 , __lowercase=1e-6 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=True , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=True , __lowercase=None , **__lowercase , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = intermediate_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = initializer_range __lowerCAmelCase = rms_norm_eps __lowerCAmelCase = use_cache __lowerCAmelCase = kwargs.pop( '''use_memorry_efficient_attention''' , __lowerCAmelCase ) __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_dropout_prob __lowerCAmelCase = use_stable_embedding __lowerCAmelCase = shared_input_output_embedding __lowerCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , ) def _snake_case (self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) __lowerCAmelCase = self.rope_scaling.get('''type''' , __lowerCAmelCase ) __lowerCAmelCase = self.rope_scaling.get('''factor''' , __lowerCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
174
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class _A ( lowerCAmelCase , lowerCAmelCase ): snake_case__ : Tuple = 1 @register_to_config def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = None ): """simple docstring""" self.set_timesteps(__lowerCAmelCase ) # standard deviation of the initial noise distribution lowercase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. lowercase = 4 # running values lowercase = [] def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" lowercase = num_inference_steps lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] lowercase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: lowercase = torch.sin(steps * math.pi / 2 ) ** 2 lowercase = (1.0 - self.betas**2) ** 0.5 lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] lowercase = timesteps.to(__lowerCAmelCase ) lowercase = [] def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ): """simple docstring""" if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) lowercase = (self.timesteps == timestep).nonzero().item() lowercase = timestep_index + 1 lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__lowerCAmelCase ) if len(self.ets ) == 1: lowercase = self.ets[-1] elif len(self.ets ) == 2: lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) lowercase = self._get_prev_sample(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCAmelCase ) def A__ ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return sample def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" lowercase = self.alphas[timestep_index] lowercase = self.betas[timestep_index] lowercase = self.alphas[prev_timestep_index] lowercase = self.betas[prev_timestep_index] lowercase = (sample - sigma * ets) / max(__lowerCAmelCase , 1E-8 ) lowercase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
197
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def _A ( ): """simple docstring""" __lowercase =_ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: __lowercase =get_sagemaker_input() else: __lowercase =get_cluster_input() return config def _A ( _lowerCAmelCase=None ): """simple docstring""" if subparsers is not None: __lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase ) else: __lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase ) parser.add_argument( '--config_file' , default=_lowerCAmelCase , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=_lowerCAmelCase ) return parser def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =get_user_input() if args.config_file is not None: __lowercase =args.config_file else: if not os.path.isdir(_lowerCAmelCase ): os.makedirs(_lowerCAmelCase ) __lowercase =default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(_lowerCAmelCase ) else: config.to_yaml_file(_lowerCAmelCase ) print(f"""accelerate configuration saved at {config_file}""" ) def _A ( ): """simple docstring""" __lowercase =config_command_parser() __lowercase =parser.parse_args() config_command(_lowerCAmelCase ) if __name__ == "__main__": main()
48
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ): """simple docstring""" if config_name_or_path is None: __lowercase ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: __lowercase =generator_name_or_path if question_encoder_tokenizer_name_or_path is None: __lowercase =question_encoder_name_or_path __lowercase =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. __lowercase =RagConfig.from_pretrained(_lowerCAmelCase ) __lowercase =AutoConfig.from_pretrained(_lowerCAmelCase ) __lowercase =AutoConfig.from_pretrained(_lowerCAmelCase ) __lowercase =gen_config __lowercase =question_encoder_config __lowercase =model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' ) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) lowerCamelCase = parser.parse_args() lowerCamelCase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
48
1
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"} SCREAMING_SNAKE_CASE = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } SCREAMING_SNAKE_CASE = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } class UpperCAmelCase_ ( A_ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ['''input_ids''', '''attention_mask'''] lowercase__ = [] def __init__( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str]="<unk>" , snake_case_ : Dict="<s>" , snake_case_ : List[str]="</s>" , snake_case_ : Tuple="<pad>" , snake_case_ : str="[SEP]" , snake_case_ : Any="[MASK]" , snake_case_ : Tuple="[CLS]" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Dict , ) -> None: '''simple docstring''' A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sep_token=snake_case_ , mask_token=snake_case_ , cls_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , ) A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case_ ) @property def __magic_name__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' return self.sp_model.get_piece_size() def __magic_name__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' A__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> str: '''simple docstring''' A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : List[Any] , snake_case_ : Optional[Any] ) -> Tuple: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self : Dict , snake_case_ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(snake_case_ , out_type=snake_case_ ) def __magic_name__ ( self : Dict , snake_case_ : Any ) -> str: '''simple docstring''' return self.sp_model.piece_to_id(snake_case_ ) def __magic_name__ ( self : Dict , snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' A__ = self.sp_model.IdToPiece(snake_case_ ) return token def __magic_name__ ( self : List[Any] , snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' A__ = [] A__ = "" A__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case_ ) + token A__ = True A__ = [] else: current_sub_tokens.append(snake_case_ ) A__ = False out_string += self.sp_model.decode(snake_case_ ) return out_string.strip() def __magic_name__ ( self : Tuple , snake_case_ : List[int] , snake_case_ : bool = False , snake_case_ : bool = None , snake_case_ : bool = True , **snake_case_ : Optional[int] , ) -> str: '''simple docstring''' A__ = kwargs.pop("use_source_tokenizer" , snake_case_ ) A__ = self.convert_ids_to_tokens(snake_case_ , skip_special_tokens=snake_case_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A__ = [] A__ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(snake_case_ ) ) A__ = [] sub_texts.append(snake_case_ ) else: current_sub_text.append(snake_case_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(snake_case_ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A__ = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(snake_case_ ) ) else: A__ = "".join(snake_case_ ) A__ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A__ = self.clean_up_tokenization(snake_case_ ) return clean_text else: return text def __magic_name__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(snake_case_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return A__ = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case_ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case_ , "wb" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(snake_case_ ) return (out_vocab_file,) def __magic_name__ ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __magic_name__ ( self : List[str] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) if token_ids_a is None: return [1] + ([0] * len(snake_case_ )) + [1] return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1] def __magic_name__ ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
247
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int: if exponent == 1: return base if exponent % 2 == 0: A__ = _modexpt(lowercase_ , exponent // 2 , lowercase_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowercase_ , exponent - 1 , lowercase_ )) % modulo_value def _SCREAMING_SNAKE_CASE ( lowercase_ = 17_77 , lowercase_ = 18_55 , lowercase_ = 8 ) -> int: A__ = base for _ in range(1 , lowercase_ ): A__ = _modexpt(lowercase_ , lowercase_ , 10**digits ) return result if __name__ == "__main__": print(f'{solution() = }')
247
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class _snake_case : def __init__( self , a , a , a = True , a = False) -> Tuple: SCREAMING_SNAKE_CASE = scheduler SCREAMING_SNAKE_CASE = optimizers if isinstance(a , (list, tuple)) else [optimizers] SCREAMING_SNAKE_CASE = split_batches SCREAMING_SNAKE_CASE = step_with_optimizer SCREAMING_SNAKE_CASE = GradientState() def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*a , **a) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*a , **a) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE = AcceleratorState().num_processes for _ in range(a): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps'): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*a , **a) else: self.scheduler.step(*a , **a) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return self.scheduler.get_last_lr() def SCREAMING_SNAKE_CASE__ ( self) -> Dict: return self.scheduler.state_dict() def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]: self.scheduler.load_state_dict(a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return self.scheduler.get_lr() def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]: return self.scheduler.print_lr(*a , **a)
327
from math import isqrt def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1) + 1): if is_prime[i]: for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = False return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]] def lowerCamelCase__ (_UpperCAmelCase = 10**8): SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
327
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict: _A : str = parent _A : int = batch_size _A : Optional[int] = num_channels _A : List[Any] = image_size _A : int = min_resolution _A : Optional[int] = max_resolution _A : Any = do_resize _A : List[str] = size if size is not None else {"""height""": 18, """width""": 20} _A : Optional[int] = do_thumbnail _A : str = do_align_axis _A : List[Any] = do_pad _A : Optional[Any] = do_normalize _A : Tuple = image_mean _A : List[str] = image_std def a__ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = DonutImageProcessor if is_vision_available() else None def a__ ( self ) -> Optional[int]: _A : List[str] = DonutImageProcessingTester(self ) @property def a__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[Any]: _A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """do_thumbnail""" ) ) self.assertTrue(hasattr(_a , """do_align_long_axis""" ) ) self.assertTrue(hasattr(_a , """do_pad""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) def a__ ( self ) -> List[Any]: _A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) _A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order _A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def a__ ( self ) -> Union[str, Any]: pass @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Dict: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input _A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def a__ ( self ) -> Optional[int]: # Initialize image_processing _A : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
26
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase__ ( A_ ): """simple docstring""" def _a ( self , A_ ) -> float: return 0.0 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds __UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) ) plt.show()
62
0
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class snake_case__ : lowercase__ : int lowercase__ : TreeNode | None = None lowercase__ : TreeNode | None = None __magic_name__: Dict = namedtuple("CoinsDistribResult", "moves excess") def UpperCamelCase ( _A ): """simple docstring""" if root is None: return 0 # Validation def count_nodes(_A ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_A ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_A ) != count_coins(_A ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(_A ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0, 1 ) __magic_name__ ,__magic_name__ : List[str] = get_distrib(node.left ) __magic_name__ ,__magic_name__ : Dict = get_distrib(node.right ) __magic_name__ : Tuple = 1 - left_distrib_excess __magic_name__ : Optional[Any] = 1 - right_distrib_excess __magic_name__ : Dict = ( left_distrib_moves + right_distrib_moves + abs(_A ) + abs(_A ) ) __magic_name__ : Optional[Any] = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_A, _A ) return get_distrib(_A )[0] if __name__ == "__main__": import doctest doctest.testmod()
138
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __magic_name__: int = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def UpperCamelCase ( _A, _A=None ): """simple docstring""" require_version(deps[pkg], _A )
138
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Optional[int] ="blenderbot-small" a : str =["past_key_values"] a : List[str] ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=50_265 , snake_case__=512 , snake_case__=8 , snake_case__=2_048 , snake_case__=16 , snake_case__=8 , snake_case__=2_048 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=True , snake_case__=True , snake_case__="gelu" , snake_case__=512 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=2 , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Dict = vocab_size lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Union[str, Any] = d_model lowerCAmelCase : List[str] = encoder_ffn_dim lowerCAmelCase : Optional[int] = encoder_layers lowerCAmelCase : str = encoder_attention_heads lowerCAmelCase : int = decoder_ffn_dim lowerCAmelCase : int = decoder_layers lowerCAmelCase : int = decoder_attention_heads lowerCAmelCase : List[Any] = dropout lowerCAmelCase : int = attention_dropout lowerCAmelCase : List[str] = activation_dropout lowerCAmelCase : int = activation_function lowerCAmelCase : str = init_std lowerCAmelCase : Optional[int] = encoder_layerdrop lowerCAmelCase : Tuple = decoder_layerdrop lowerCAmelCase : Dict = use_cache lowerCAmelCase : Any = encoder_layers lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" @property def lowercase__ ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase : List[str] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCAmelCase : Optional[Any] = {0: "batch"} lowerCAmelCase : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCAmelCase : str = {0: "batch", 1: "decoder_sequence"} lowerCAmelCase : Any = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.num_layers for i in range(snake_case__ ): lowerCAmelCase : List[Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCAmelCase : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCAmelCase : List[str] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowercase__ ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase : List[str] = super().outputs else: lowerCAmelCase : List[str] = super(snake_case__ , self ).outputs if self.use_past: lowerCAmelCase , lowerCAmelCase : Optional[int] = self.num_layers for i in range(snake_case__ ): lowerCAmelCase : Any = {0: "batch", 2: "past_sequence + sequence"} lowerCAmelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Generate decoder inputs lowerCAmelCase : str = seq_length if not self.use_past else 1 lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Any = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase : str = dict(**snake_case__ , **snake_case__ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase , lowerCAmelCase : Union[str, Any] = common_inputs["input_ids"].shape lowerCAmelCase : int = common_inputs["decoder_input_ids"].shape[1] lowerCAmelCase , lowerCAmelCase : str = self.num_attention_heads lowerCAmelCase : Dict = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase : Dict = decoder_seq_length + 3 lowerCAmelCase : Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase : Dict = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(snake_case__ , snake_case__ )] , dim=1 ) lowerCAmelCase : Tuple = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase , lowerCAmelCase : str = self.num_layers lowerCAmelCase : Tuple = min(snake_case__ , snake_case__ ) lowerCAmelCase : Optional[int] = max(snake_case__ , snake_case__ ) - min_num_layers lowerCAmelCase : Any = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(snake_case__ ): common_inputs["past_key_values"].append( ( torch.zeros(snake_case__ ), torch.zeros(snake_case__ ), torch.zeros(snake_case__ ), torch.zeros(snake_case__ ), ) ) # TODO: test this. lowerCAmelCase : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(snake_case__ , snake_case__ ): common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) ) return common_inputs def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase , lowerCAmelCase : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase : List[Any] = seqlen + 2 lowerCAmelCase , lowerCAmelCase : Optional[int] = self.num_layers lowerCAmelCase , lowerCAmelCase : Tuple = self.num_attention_heads lowerCAmelCase : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase : List[Any] = common_inputs["attention_mask"].dtype lowerCAmelCase : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) lowerCAmelCase : Union[str, Any] = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ ) ] return common_inputs def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Optional[int] = compute_effective_axis_dimension( snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase : Any = tokenizer.num_special_tokens_to_add(snake_case__ ) lowerCAmelCase : str = compute_effective_axis_dimension( snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase : List[Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase : Tuple = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) ) return common_inputs def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) elif self.task == "causal-lm": lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) else: lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) return common_inputs def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase : Tuple = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: lowerCAmelCase : Dict = super(snake_case__ , self )._flatten_past_key_values_( snake_case__ , snake_case__ , snake_case__ , snake_case__ )
108
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil lowerCAmelCase__ = 100 lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2)) primes.add(2) lowerCAmelCase__ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_0_0 ) def a__ ( SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase : set[int] = set() lowerCAmelCase : int lowerCAmelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def a__ ( SCREAMING_SNAKE_CASE : int = 5_0_0_0 ): '''simple docstring''' for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ): if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
108
1
import qiskit def __UpperCamelCase ( _A : int , _A : int ) ->qiskit.result.counts.Counts: """simple docstring""" lowerCamelCase_ =qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register lowerCamelCase_ =qiskit.QuantumCircuit(_A , _A ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCamelCase_ =qiskit.execute(_A , _A , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_A ) if __name__ == "__main__": print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
49
__A : List[Any] = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] __A : int = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] __A : Any = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] __A : Dict = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] __A : List[str] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] __A : List[str] = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] __A : Dict = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] __A : str = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
49
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class a__ ( snake_case ): """simple docstring""" def __get__( self , lowercase , lowercase=None ) -> Optional[Any]: '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) A__ = "__cached_" + self.fget.__name__ A__ = getattr(lowercase , lowercase , lowercase ) if cached is None: A__ = self.fget(lowercase ) setattr(lowercase , lowercase , lowercase ) return cached def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Any: '''simple docstring''' A__ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'invalid truth value {val!r}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]: '''simple docstring''' if is_torch_fx_proxy(SCREAMING_SNAKE_CASE_ ): return True if is_torch_available(): import torch if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(SCREAMING_SNAKE_CASE_ , (jnp.ndarray, Tracer) ): return True return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple: '''simple docstring''' return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Optional[Any]: '''simple docstring''' return _is_numpy(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Optional[Any]: '''simple docstring''' import torch return isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Any: '''simple docstring''' return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> str: '''simple docstring''' import torch return isinstance(SCREAMING_SNAKE_CASE_ , torch.device ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Tuple: '''simple docstring''' return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple: '''simple docstring''' import torch if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: return False return isinstance(SCREAMING_SNAKE_CASE_ , torch.dtype ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[Any]: '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' import tensorflow as tf return isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Union[str, Any]: '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(SCREAMING_SNAKE_CASE_ , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE_ ) return type(SCREAMING_SNAKE_CASE_ ) == tf.Tensor def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> int: '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> str: '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Any: '''simple docstring''' return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> str: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ): return {k: to_py_obj(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()} elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): return [to_py_obj(SCREAMING_SNAKE_CASE_ ) for o in obj] elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): return obj.numpy().tolist() elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return obj.detach().cpu().tolist() elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return np.asarray(SCREAMING_SNAKE_CASE_ ).tolist() elif isinstance(SCREAMING_SNAKE_CASE_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Union[str, Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ): return {k: to_numpy(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()} elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): return np.array(SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): return obj.numpy() elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return obj.detach().cpu().numpy() elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return np.asarray(SCREAMING_SNAKE_CASE_ ) else: return obj class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = fields(self ) # Safety and consistency checks if not len(lowercase ): raise ValueError(F'{self.__class__.__name__} has no fields.' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' ) A__ = getattr(self , class_fields[0].name ) A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(lowercase ): if isinstance(lowercase , lowercase ): A__ = first_field.items() A__ = True else: try: A__ = iter(lowercase ) A__ = True except TypeError: A__ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(lowercase ): if ( not isinstance(lowercase , (list, tuple) ) or not len(lowercase ) == 2 or not isinstance(element[0] , lowercase ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute A__ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: A__ = element[1] elif first_field is not None: A__ = first_field else: for field in class_fields: A__ = getattr(self , field.name ) if v is not None: A__ = v def __delitem__( self , *lowercase , **lowercase ) -> int: '''simple docstring''' raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' ) def UpperCamelCase ( self , *lowercase , **lowercase ) -> Optional[int]: '''simple docstring''' raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' ) def UpperCamelCase ( self , *lowercase , **lowercase ) -> Dict: '''simple docstring''' raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' ) def UpperCamelCase ( self , *lowercase , **lowercase ) -> List[Any]: '''simple docstring''' raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' ) def __getitem__( self , lowercase ) -> Optional[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , lowercase , lowercase ) -> str: '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(lowercase , lowercase ) super().__setattr__(lowercase , lowercase ) def __setitem__( self , lowercase , lowercase ) -> str: '''simple docstring''' super().__setitem__(lowercase , lowercase ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(lowercase , lowercase ) def UpperCamelCase ( self ) -> Tuple[Any]: '''simple docstring''' return tuple(self[k] for k in self.keys() ) class a__ ( snake_case , snake_case ): """simple docstring""" @classmethod def UpperCamelCase ( cls , lowercase ) -> Optional[int]: '''simple docstring''' raise ValueError( F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'longest' __lowerCamelCase = 'max_length' __lowerCamelCase = 'do_not_pad' class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'pt' __lowerCamelCase = 'tf' __lowerCamelCase = 'np' __lowerCamelCase = 'jax' class a__ : """simple docstring""" def __init__( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = context_managers A__ = ExitStack() def __enter__( self ) -> Union[str, Any]: '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(lowercase ) def __exit__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' self.stack.__exit__(*lowercase , **lowercase ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]: '''simple docstring''' A__ = infer_framework(SCREAMING_SNAKE_CASE_ ) if framework == "tf": A__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": A__ = inspect.signature(model_class.forward ) # PyTorch models else: A__ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = model_class.__name__ A__ = infer_framework(SCREAMING_SNAKE_CASE_ ) if framework == "tf": A__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": A__ = inspect.signature(model_class.forward ) # PyTorch models else: A__ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: MutableMapping , SCREAMING_SNAKE_CASE_: str = "" , SCREAMING_SNAKE_CASE_: str = "." ) -> Union[str, Any]: '''simple docstring''' def _flatten_dict(SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str="" , SCREAMING_SNAKE_CASE_: Any="." ): for k, v in d.items(): A__ = str(SCREAMING_SNAKE_CASE_ ) + delimiter + str(SCREAMING_SNAKE_CASE_ ) if parent_key else k if v and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): yield from flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delimiter=SCREAMING_SNAKE_CASE_ ).items() else: yield key, v return dict(_flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) @contextmanager def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: bool = False ) -> Dict: '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str]=None ) -> List[Any]: '''simple docstring''' if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.transpose(SCREAMING_SNAKE_CASE_ , perm=SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F'Type not supported for transpose: {type(SCREAMING_SNAKE_CASE_ )}.' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int: '''simple docstring''' if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.reshape(*SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F'Type not supported for reshape: {type(SCREAMING_SNAKE_CASE_ )}.' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str]=None ) -> str: '''simple docstring''' if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F'Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE_ )}.' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any ) -> Union[str, Any]: '''simple docstring''' if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.expand_dims(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.unsqueeze(dim=SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Tuple: '''simple docstring''' if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.size(SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.numel() elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.size(SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return array.size else: raise ValueError(F'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Dict: '''simple docstring''' for key, value in auto_map.items(): if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ): A__ = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: A__ = F'{repo_id}--{value}' return auto_map def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Union[str, Any]: '''simple docstring''' for base_class in inspect.getmro(SCREAMING_SNAKE_CASE_ ): A__ = base_class.__module__ A__ = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'Could not infer framework from class {model_class}.' )
68
# Function to print upper half of diamond (pyramid) def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]): '''simple docstring''' for i in range(0 ,lowerCamelCase_): for _ in range(0 ,n - i - 1): # printing spaces print(''' ''' ,end='''''') for _ in range(0 ,i + 1): # printing stars print('''* ''' ,end='''''') print() def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' for i in range(lowerCamelCase_ ,0 ,-1): for _ in range(lowerCamelCase_ ,0 ,-1): # printing stars print('''* ''' ,end='''''') print() for _ in range(n - i + 1 ,0 ,-1): # printing spaces print(''' ''' ,end='''''') def lowerCAmelCase__ ( lowerCamelCase_ : Tuple): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''') return floyd(lowerCamelCase_) # upper half reverse_floyd(lowerCamelCase_) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') __snake_case : int =1 while K: __snake_case : Optional[int] =int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) __snake_case : str =int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
129
0
from __future__ import annotations import time import numpy as np lowerCAmelCase__ = [8, 5, 9, 7] lowerCAmelCase__ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowerCAmelCase__ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class a_ : '''simple docstring''' def __init__( self : str , lowercase__ : list[int] , lowercase__ : list[list[int]] , lowercase__ : list[list[int]] , ): '''simple docstring''' lowerCAmelCase__ = claim_vector lowerCAmelCase__ = allocated_resources_table lowerCAmelCase__ = maximum_claim_table def __snake_case ( self : Optional[int]): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table) for i in range(len(self.__allocated_resources_table[0])) ] def __snake_case ( self : List[str]): '''simple docstring''' return np.array(self.__claim_vector) - np.array( self.__processes_resource_summation()) def __snake_case ( self : Dict): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i]) - np.array(lowercase__)) for i, allocated_resource in enumerate(self.__allocated_resources_table) ] def __snake_case ( self : Dict): '''simple docstring''' return {self.__need().index(lowercase__): i for i in self.__need()} def __snake_case ( self : List[Any] , **lowercase__ : int): '''simple docstring''' lowerCAmelCase__ = self.__need() lowerCAmelCase__ = self.__allocated_resources_table lowerCAmelCase__ = self.__available_resources() lowerCAmelCase__ = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n') while need_list: lowerCAmelCase__ = False for each_need in need_list: lowerCAmelCase__ = True for index, need in enumerate(lowercase__): if need > available_resources[index]: lowerCAmelCase__ = False break if execution: lowerCAmelCase__ = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCAmelCase__ = original_need_index print(F"""Process {process_number + 1} is executing.""") # remove the process run from stack need_list.remove(lowercase__) # update available/freed resources stack lowerCAmelCase__ = np.array(lowercase__) + np.array( alloc_resources_table[process_number]) print( 'Updated available resource stack for processes: ' + ' '.join([str(lowercase__) for x in available_resources])) break if safe: print('The process is in a safe state.\n') else: print('System in unsafe state. Aborting...\n') break def __snake_case ( self : List[str]): '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table') for item in self.__allocated_resources_table: print( F"""P{self.__allocated_resources_table.index(lowercase__) + 1}""" + ' '.join(F"""{it:>8}""" for it in item) + '\n') print(' ' * 9 + 'System Resource Table') for item in self.__maximum_claim_table: print( F"""P{self.__maximum_claim_table.index(lowercase__) + 1}""" + ' '.join(F"""{it:>8}""" for it in item) + '\n') print( 'Current Usage by Active Processes: ' + ' '.join(str(lowercase__) for x in self.__claim_vector)) print( 'Initial Available Resources: ' + ' '.join(str(lowercase__) for x in self.__available_resources())) time.sleep(1) if __name__ == "__main__": import doctest doctest.testmod()
119
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __lowerCamelCase ( lowerCAmelCase__ ): # Make sure the supplied data is a bytes-like object if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = F"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(lowerCAmelCase__ ) lowerCAmelCase__ = ''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowerCAmelCase__ = len(lowerCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowerCAmelCase__ = b'=' * ((6 - len(lowerCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6) else: lowerCAmelCase__ = b'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode() + padding ) def __lowerCamelCase ( lowerCAmelCase__ ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = ( 'argument should be a bytes-like object or ASCII string, ' F"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(lowerCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): try: lowerCAmelCase__ = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) lowerCAmelCase__ = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowerCAmelCase__ = encoded_data[:-padding] lowerCAmelCase__ = ''.join( bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowerCAmelCase__ = ''.join( bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowerCAmelCase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(lowerCAmelCase__ ) , 8 ) ] return bytes(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
119
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __A ( unittest.TestCase ): def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ): self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ ) def lowercase__ ( self : int ): lowerCAmelCase : Optional[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(lowerCAmelCase__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def lowercase__ ( self : str ): lowerCAmelCase : Any = None ops.enable_eager_execution_internal() lowerCAmelCase : int = tf.config.list_physical_devices('CPU' ) if len(lowerCAmelCase__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCAmelCase : Optional[int] = tf.config.list_logical_devices(device_type='CPU' ) lowerCAmelCase : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCAmelCase : int = GradientAccumulator() lowerCAmelCase : Optional[Any] = tf.Variable([4.0, 3.0] ) lowerCAmelCase : Dict = create_optimizer(5E-5 , 10 , 5 ) lowerCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase__ ) def accumulate_on_replica(UpperCAmelCase_ : str ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ): with strategy.scope(): lowerCAmelCase : Optional[Any] = strategy.experimental_local_results(lowerCAmelCase__ ) local_variables[0].assign(lowerCAmelCase__ ) local_variables[1].assign(lowerCAmelCase__ ) strategy.run(lowerCAmelCase__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(lowerCAmelCase__ ) def _check_local_values(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ): lowerCAmelCase : Tuple = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , lowerCAmelCase__ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , lowerCAmelCase__ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
138
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCAmelCase : int = False class __lowerCAmelCase ( unittest.TestCase): pass @nightly @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Any: '''simple docstring''' a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : int =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) a__ : Optional[Any] =torch.manual_seed(0 ) a__ : Optional[Any] =pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase__ ) a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] =generator.manual_seed(0 ) a__ : Tuple =pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self ) -> Any: '''simple docstring''' a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] ="cyberpunk 2077" a__ : int =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) a__ : Union[str, Any] =torch.manual_seed(0 ) a__ : Tuple =pipe.dual_guided( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 a__ : str ="A painting of a squirrel eating a burger " a__ : Optional[int] =torch.manual_seed(0 ) a__ : str =pipe.text_to_image( prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
95
0
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : Any , __a : int = 16 , __a : int = 88 , __a : Optional[int] = None , __a : int = 1 , __a : float = 0.0 , __a : int = 32 , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , ): super().__init__() _a = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _a = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _a = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _a = [1, 0] def UpperCamelCase__ ( self : int , __a : List[str] , __a : int , __a : Union[str, Any]=None , __a : Any=None , __a : Optional[Any]=None , __a : bool = True , ): _a = hidden_states _a = [] _a = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _a = self.transformer_index_for_condition[i] _a = self.transformers[transformer_index]( __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _a = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
363
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ): super().__init__(*__a , **__a ) self.check_model_type(__a ) def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ): _a , _a = {}, {} if padding is not None: _a = padding if truncation is not None: _a = truncation if top_k is not None: _a = top_k return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ): if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ): _a = {"image": image, "question": question} else: _a = image _a = super().__call__(__a , **__a ) return results def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ): _a = load_image(inputs["image"] ) _a = self.tokenizer( inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a ) _a = self.image_processor(images=__a , return_tensors=self.framework ) model_inputs.update(__a ) return model_inputs def UpperCamelCase__ ( self : List[Any] , __a : List[str] ): _a = self.model(**__a ) return model_outputs def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ): if top_k > self.model.config.num_labels: _a = self.model.config.num_labels if self.framework == "pt": _a = model_outputs.logits.sigmoid()[0] _a , _a = probs.topk(__a ) else: raise ValueError(f'Unsupported framework: {self.framework}' ) _a = scores.tolist() _a = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
346
0
"""simple docstring""" def _snake_case ( lowercase__ : Dict ) -> List[Any]: '''simple docstring''' stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] ) -> int: '''simple docstring''' if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowerCAmelCase_ , lowerCAmelCase_ :int = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowerCAmelCase_ :Any = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
84
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowerCamelCase : Tuple =_symbol_database.Default() lowerCamelCase : List[str] =_descriptor_pool.Default().AddSerializedFile( b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03''' ) lowerCamelCase : str =globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowerCamelCase : Optional[int] =None lowerCamelCase : Tuple =b'''H\003''' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowerCamelCase : List[str] =45 lowerCamelCase : List[Any] =1581 lowerCamelCase : Optional[int] =1517 lowerCamelCase : Tuple =1570 lowerCamelCase : Dict =1584 lowerCamelCase : Optional[Any] =1793 lowerCamelCase : Dict =1795 lowerCamelCase : Any =1916 lowerCamelCase : Dict =1864 lowerCamelCase : Dict =1905 lowerCamelCase : Dict =1919 lowerCamelCase : Union[str, Any] =2429 lowerCamelCase : List[Any] =2208 lowerCamelCase : List[Any] =2418 lowerCamelCase : List[str] =2323 lowerCamelCase : Dict =2407 # @@protoc_insertion_point(module_scope)
189
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) _SCREAMING_SNAKE_CASE = """pytorch_model.bin""" @dataclasses.dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) __magic_name__: Optional[str] = dataclasses.field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) __magic_name__: str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) __magic_name__: Optional[str] = dataclasses.field( default=snake_case_ , metadata={"help": "A csv or a json file containing the validation data."} ) __magic_name__: Optional[str] = dataclasses.field( default=snake_case_ , metadata={"help": "The name of the task to train on."} , ) __magic_name__: Optional[List[str]] = dataclasses.field( default=snake_case_ , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) __magic_name__: Optional[str] = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) __magic_name__: Optional[str] = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) __magic_name__: Optional[int] = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __magic_name__: Optional[float] = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) __magic_name__: Optional[bool] = dataclasses.field( default=snake_case_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) __magic_name__: Optional[bool] = dataclasses.field( default=snake_case_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) __magic_name__: Optional[bool] = dataclasses.field( default=snake_case_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) __magic_name__: Optional[float] = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) __magic_name__: Optional[int] = dataclasses.field( default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __magic_name__: Optional[int] = dataclasses.field( default=snake_case_ , metadata={"help": "Random seed for initialization."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a ): snake_case_ : Any = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: snake_case_ : Optional[int] = dataset.filter(lambda __a : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 snake_case_ : Optional[Any] = int(eval_result * len(__a ) ) print(__a ) snake_case_ : Optional[Any] = dataset.sort('probability' , reverse=__a ) snake_case_ : Dict = dataset.select(range(__a ) ) snake_case_ : Union[str, Any] = dataset.remove_columns(['label', 'probability'] ) snake_case_ : Optional[int] = dataset.rename_column('prediction' , 'label' ) snake_case_ : Any = dataset.map(lambda __a : {"label": idalabel[example["label"]]} ) snake_case_ : List[Any] = dataset.shuffle(seed=args.seed ) snake_case_ : List[Any] = os.path.join(__a , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__a , index=__a ) else: dataset.to_json(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , **__a ): snake_case_ : List[str] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() snake_case_ : List[Any] = STModelArguments(model_name_or_path=__a ) snake_case_ : str = STDataArguments(train_file=__a , infer_file=__a ) snake_case_ : str = STTrainingArguments(output_dir=__a ) snake_case_ : List[Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__a ).items(): setattr(__a , __a , __a ) for key, value in kwargs.items(): if hasattr(__a , __a ): setattr(__a , __a , __a ) # Sanity checks snake_case_ : str = {} snake_case_ : Tuple = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None snake_case_ : int = args.train_file snake_case_ : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None snake_case_ : Union[str, Any] = args.eval_file for key in data_files: snake_case_ : Optional[Any] = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: snake_case_ : Union[str, Any] = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) snake_case_ : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format snake_case_ : Optional[Any] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__a ) os.makedirs(__a , exist_ok=__a ) accelerator.wait_for_everyone() snake_case_ : Union[str, Any] = None snake_case_ : List[str] = None snake_case_ : List[Any] = 0 snake_case_ : Dict = False # Show the progress bar snake_case_ : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): snake_case_ : Optional[int] = data_dir_format(__a ) assert os.path.exists(__a ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 snake_case_ : str = os.path.join(__a , 'stage-1' ) snake_case_ : Tuple = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__a , __a ): arguments_dict.update({key: value} ) snake_case_ : List[Any] = os.path.join(__a , 'best-checkpoint' , __a ) if os.path.exists(__a ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __a , __a , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __a ) finetune(**__a ) accelerator.wait_for_everyone() assert os.path.exists(__a ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , __a ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data snake_case_ : str = os.path.join(__a , 'best-checkpoint' ) snake_case_ : str = os.path.join(__a , 'stage-2' ) # Update arguments_dict snake_case_ : Any = model_path snake_case_ : int = data_files['train'] snake_case_ : Dict = current_output_dir snake_case_ : Any = os.path.join(__a , 'best-checkpoint' , __a ) if os.path.exists(__a ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __a , __a , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __a ) finetune(**__a ) accelerator.wait_for_everyone() assert os.path.exists(__a ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , __a ) snake_case_ : Optional[int] = iteration snake_case_ : Tuple = data_dir_format(iteration + 1 ) snake_case_ : int = AutoConfig.from_pretrained(os.path.join(__a , 'best-checkpoint' ) ) snake_case_ : int = config.idalabel snake_case_ : Optional[Any] = os.path.join(__a , 'eval_results_best-checkpoint.json' ) snake_case_ : Optional[Any] = os.path.join(__a , 'test_results_best-checkpoint.json' ) assert os.path.exists(__a ) with open(__a , 'r' ) as f: snake_case_ : int = float(json.load(__a )[args.eval_metric] ) snake_case_ : List[Any] = os.path.join(__a , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(__a ) # Loading the dataset from local csv or json files. snake_case_ : Optional[int] = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] snake_case_ : Union[str, Any] = load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(__a , exist_ok=__a ) shutil.copy(__a , os.path.join(__a , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__a ): shutil.copy(__a , os.path.join(__a , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__a , __a , __a , __a , __a , __a ) accelerator.wait_for_everyone() snake_case_ : Optional[int] = os.path.join(__a , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: snake_case_ : int = eval_result if best_iteration is None: snake_case_ : Tuple = new_iteration snake_case_ : Union[str, Any] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: snake_case_ : List[str] = new_iteration snake_case_ : Optional[int] = new_eval_result snake_case_ : List[Any] = 0 else: if new_eval_result == best_eval_result: snake_case_ : Union[str, Any] = new_iteration snake_case_ : List[Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: snake_case_ : Optional[Any] = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , __a ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__a , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__a , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__a , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__a , 'eval_results_best-iteration.json' ) , )
88
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): snake_case_ : Optional[int] = None if token is not None: snake_case_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} snake_case_ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" snake_case_ : Optional[int] = requests.get(__a , headers=__a ).json() snake_case_ : List[str] = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) snake_case_ : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 ) for i in range(__a ): snake_case_ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): snake_case_ : Union[str, Any] = None if token is not None: snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} snake_case_ : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" snake_case_ : Union[str, Any] = requests.get(__a , headers=__a ).json() snake_case_ : Any = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) snake_case_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 ) for i in range(__a ): snake_case_ : int = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ): snake_case_ : Dict = None if token is not None: snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} snake_case_ : Optional[int] = requests.get(__a , headers=__a , allow_redirects=__a ) snake_case_ : str = result.headers['Location'] snake_case_ : List[str] = requests.get(__a , allow_redirects=__a ) snake_case_ : Optional[Any] = os.path.join(__a , f"""{artifact_name}.zip""" ) with open(__a , 'wb' ) as fp: fp.write(response.content ) def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): snake_case_ : Any = [] snake_case_ : Any = [] snake_case_ : Tuple = None with zipfile.ZipFile(__a ) as z: for filename in z.namelist(): if not os.path.isdir(__a ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__a ) as f: for line in f: snake_case_ : Tuple = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs snake_case_ : Tuple = line[: line.index(': ' )] snake_case_ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed snake_case_ : Any = line[len('FAILED ' ) :] failed_tests.append(__a ) elif filename == "job_name.txt": snake_case_ : Union[str, Any] = line if len(__a ) != len(__a ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """ f"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ' problem.' ) snake_case_ : List[str] = None if job_name and job_links: snake_case_ : Union[str, Any] = job_links.get(__a , __a ) # A list with elements of the form (line of error, error, failed test) snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__a , __a )] return result def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): snake_case_ : Any = [] snake_case_ : Any = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) ) return errors def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): snake_case_ : Optional[int] = Counter() counter.update([x[1] for x in logs] ) snake_case_ : str = counter.most_common() snake_case_ : Tuple = {} for error, count in counts: if error_filter is None or error not in error_filter: snake_case_ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) ) return r def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Tuple = test.split('::' )[0] if test.startswith('tests/models/' ): snake_case_ : List[str] = test.split('/' )[2] else: snake_case_ : Union[str, Any] = None return test def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): snake_case_ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs] snake_case_ : str = [x for x in logs if x[2] is not None] snake_case_ : int = {x[2] for x in logs} snake_case_ : Dict = {} for test in tests: snake_case_ : List[str] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) snake_case_ : Any = counter.most_common() snake_case_ : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} snake_case_ : Tuple = sum(error_counts.values() ) if n_errors > 0: snake_case_ : List[Any] = {'count': n_errors, 'errors': error_counts} snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) ) return r def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[Any] = '| no. | error | status |' snake_case_ : str = '|-:|:-|:-|' snake_case_ : Tuple = [header, sep] for error in reduced_by_error: snake_case_ : Dict = reduced_by_error[error]['count'] snake_case_ : List[str] = f"""| {count} | {error[:1_00]} | |""" lines.append(__a ) return "\n".join(__a ) def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[Any] = '| model | no. of errors | major error | count |' snake_case_ : Union[str, Any] = '|-:|-:|-:|-:|' snake_case_ : Optional[int] = [header, sep] for model in reduced_by_model: snake_case_ : Any = reduced_by_model[model]['count'] snake_case_ ,snake_case_ : Dict = list(reduced_by_model[model]['errors'].items() )[0] snake_case_ : Any = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(__a ) return "\n".join(__a ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") _SCREAMING_SNAKE_CASE = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token) _SCREAMING_SNAKE_CASE = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _SCREAMING_SNAKE_CASE = k.find(""" / """) _SCREAMING_SNAKE_CASE = k[index + len(""" / """) :] _SCREAMING_SNAKE_CASE = v with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _SCREAMING_SNAKE_CASE = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _SCREAMING_SNAKE_CASE = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = reduce_by_error(errors) _SCREAMING_SNAKE_CASE = reduce_by_model(errors) _SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error) _SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa) with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa)
88
1
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=() ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="no" ,_SCREAMING_SNAKE_CASE="29500" ) -> Optional[int]: lowerCamelCase : Optional[Any] = False lowerCamelCase : int = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): lowerCamelCase : Dict = True elif "IPython" in sys.modules: lowerCamelCase : Dict = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: lowerCamelCase : Tuple = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,_SCREAMING_SNAKE_CASE ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: lowerCamelCase : Any = 8 lowerCamelCase : Tuple = PrepareForLaunch(_SCREAMING_SNAKE_CASE ,distributed_type="TPU" ) print(f'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,nprocs=_SCREAMING_SNAKE_CASE ,start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*_SCREAMING_SNAKE_CASE ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_SCREAMING_SNAKE_CASE ,master_addr="127.0.01" ,master_port=_SCREAMING_SNAKE_CASE ,mixed_precision=_SCREAMING_SNAKE_CASE ): lowerCamelCase : List[str] = PrepareForLaunch(_SCREAMING_SNAKE_CASE ,distributed_type="MULTI_GPU" ) print(f'''Launching training on {num_processes} GPUs.''' ) try: start_processes(_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,nprocs=_SCREAMING_SNAKE_CASE ,start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowerCamelCase : List[str] = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=() ,_SCREAMING_SNAKE_CASE=2 ) -> List[Any]: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_SCREAMING_SNAKE_CASE ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,): lowerCamelCase : List[Any] = PrepareForLaunch(_SCREAMING_SNAKE_CASE ,debug=_SCREAMING_SNAKE_CASE ) start_processes(_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,nprocs=_SCREAMING_SNAKE_CASE ,start_method="fork" )
48
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Any = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : int = EfficientNetConfig() lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"] lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"] lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"] lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"] lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"] lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : Any = 1000 lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Any = {v: k for k, v in idalabel.items()} return config def A ( ) -> int: lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : str = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,) return preprocessor def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )} lowerCamelCase : List[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: lowerCamelCase : Dict = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) lowerCamelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: lowerCamelCase : List[str] = "efficientnet." + item[1] lowerCamelCase : int = "classifier.weight" lowerCamelCase : Union[str, Any] = "classifier.bias" return key_mapping def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: for key, value in tf_params.items(): if "normalization" in key: continue lowerCamelCase : Tuple = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[int] = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,) lowerCamelCase : List[Any] = original_model.trainable_variables lowerCamelCase : Tuple = original_model.non_trainable_variables lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCamelCase : List[str] = param.numpy() lowerCamelCase : int = list(tf_params.keys() ) # Load HuggingFace model lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowerCamelCase : Tuple = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = outputs.logits.detach().numpy() # Original model inference lowerCamelCase : Optional[Any] = False lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 ) lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) lowerCamelCase : int = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
48
1
def UpperCAmelCase_ ( _A , _A , _A = 0 , _A = 0 ): SCREAMING_SNAKE_CASE__ = right or len(_lowerCAmelCase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(_lowerCAmelCase , _lowerCAmelCase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
354
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _SCREAMING_SNAKE_CASE : Tuple = data_utils.TransfoXLTokenizer _SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLCorpus _SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils _SCREAMING_SNAKE_CASE : Any = data_utils def UpperCAmelCase_ ( _A , _A , _A , _A ): '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(_A , '''rb''' ) as fp: SCREAMING_SNAKE_CASE__ = pickle.load(_A , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__ torch.save(_A , _A ) SCREAMING_SNAKE_CASE__ = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , _A ) SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(_A , _A ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model SCREAMING_SNAKE_CASE__ = os.path.abspath(_A ) SCREAMING_SNAKE_CASE__ = os.path.abspath(_A ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": SCREAMING_SNAKE_CASE__ = TransfoXLConfig() else: SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(_A ) print(F'''Building PyTorch model from configuration: {config}''' ) SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(_A ) SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(_A , _A , _A ) # Save pytorch-model SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A ) SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A ) print(F'''Save PyTorch model to {os.path.abspath(_A )}''' ) torch.save(model.state_dict() , _A ) print(F'''Save configuration file to {os.path.abspath(_A )}''' ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) _SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
218
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __A( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def UpperCAmelCase_ (self ): UpperCamelCase__ = self.dummy_uncond_unet UpperCamelCase__ = KarrasVeScheduler() UpperCamelCase__ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type="""numpy""" ).images UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type="""numpy""" , return_dict=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = image[0, -3:, -3:, -1] UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = """google/ncsnpp-celebahq-256""" UpperCamelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = KarrasVeScheduler() UpperCamelCase__ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe(num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE_ , output_type="""numpy""" ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCamelCase__ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
244
lowerCamelCase_ = frozenset( [ '''prompt''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt''']) lowerCamelCase_ = frozenset([]) lowerCamelCase_ = frozenset(['''image''']) lowerCamelCase_ = frozenset( [ '''image''', '''height''', '''width''', '''guidance_scale''', ] ) lowerCamelCase_ = frozenset(['''image''']) lowerCamelCase_ = frozenset( [ '''prompt''', '''image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''negative_prompt''']) lowerCamelCase_ = frozenset( [ # Text guided image variation with an image mask '''prompt''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt''']) lowerCamelCase_ = frozenset( [ # image variation with an image mask '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) lowerCamelCase_ = frozenset(['''image''', '''mask_image''']) lowerCamelCase_ = frozenset( [ '''example_image''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) lowerCamelCase_ = frozenset(['''example_image''', '''image''', '''mask_image''']) lowerCamelCase_ = frozenset(['''class_labels''']) lowerCamelCase_ = frozenset(['''class_labels''']) lowerCamelCase_ = frozenset(['''batch_size''']) lowerCamelCase_ = frozenset([]) lowerCamelCase_ = frozenset(['''batch_size''']) lowerCamelCase_ = frozenset([]) lowerCamelCase_ = frozenset( [ '''prompt''', '''audio_length_in_s''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt''']) lowerCamelCase_ = frozenset(['''input_tokens''']) lowerCamelCase_ = frozenset(['''input_tokens'''])
244
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = (UniPCMultistepScheduler,) UpperCamelCase__ : Optional[Any] = (('''num_inference_steps''', 25),) def _lowerCamelCase ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[Any]=0 , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = dict(self.forward_default_kwargs) __a = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE) __a = scheduler_class(**__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) # copy over dummy past residuals __a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE) __a = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) # copy over dummy past residuals __a = dummy_past_residuals[: new_scheduler.config.solver_order] __a , __a = sample, sample for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1): __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample __a = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = dict(self.forward_default_kwargs) __a = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) # copy over dummy past residuals (must be after setting timesteps) __a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE) __a = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) # copy over dummy past residual (must be after setting timesteps) __a = dummy_past_residuals[: new_scheduler.config.solver_order] __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample __a = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' if scheduler is None: __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE) __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE) __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) for i, t in enumerate(scheduler.timesteps): __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).prev_sample return sample def _lowerCamelCase ( self : int): '''simple docstring''' __a = dict(self.forward_default_kwargs) __a = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE) for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = self.dummy_sample __a = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps'''): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps'''): __a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a = [residual + 0.2, residual + 0.15, residual + 0.10] __a = dummy_past_residuals[: scheduler.config.solver_order] __a = scheduler.timesteps[5] __a = scheduler.timesteps[6] __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = UniPCMultistepScheduler(**self.get_scheduler_config()) __a = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_mean.item() - 0.24_64) < 1E-3 __a = DPMSolverSinglestepScheduler.from_config(scheduler.config) __a = DEISMultistepScheduler.from_config(scheduler.config) __a = DPMSolverMultistepScheduler.from_config(scheduler.config) __a = UniPCMultistepScheduler.from_config(scheduler.config) __a = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_mean.item() - 0.24_64) < 1E-3 def _lowerCamelCase ( self : Any): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Dict): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int): '''simple docstring''' for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , ) __a = self.full_loop( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , ) assert not torch.isnan(__SCREAMING_SNAKE_CASE).any(), "Samples have nan numbers" def _lowerCamelCase ( self : int): '''simple docstring''' self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE) self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.full_loop() __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_mean.item() - 0.24_64) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.full_loop(prediction_type='''v_prediction''') __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_mean.item() - 0.10_14) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0) __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter.half() scheduler.set_timesteps(__SCREAMING_SNAKE_CASE) for i, t in enumerate(scheduler.timesteps): __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).prev_sample assert sample.dtype == torch.floataa def _lowerCamelCase ( self : List[Any] , **__SCREAMING_SNAKE_CASE : int): '''simple docstring''' for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE) __a = scheduler_class(**__SCREAMING_SNAKE_CASE) scheduler.set_timesteps(scheduler.config.num_train_timesteps) assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
131
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __snake_case :Dict = logging.get_logger() @dataclass class _A : UpperCamelCase__ : nn.Module UpperCamelCase__ : List[nn.Module] = field(default_factory=__UpperCAmelCase ) UpperCamelCase__ : list = field(default_factory=__UpperCAmelCase ) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Tensor): '''simple docstring''' __a = len(list(m.modules())) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad) if has_not_submodules: self.traced.append(__SCREAMING_SNAKE_CASE) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor): '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(__SCREAMING_SNAKE_CASE) [x.remove() for x in self.handles] return self @property def _lowerCamelCase ( self : List[Any]): '''simple docstring''' return list(filter(lambda __SCREAMING_SNAKE_CASE: len(list(x.state_dict().keys())) > 0 , self.traced)) @dataclass class _A : UpperCamelCase__ : nn.Module UpperCamelCase__ : nn.Module UpperCamelCase__ : int = 1 UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase ) UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase ) UpperCamelCase__ : bool = True def __call__( self : Any , __SCREAMING_SNAKE_CASE : Tensor): '''simple docstring''' __a = Tracker(self.dest)(__SCREAMING_SNAKE_CASE).parametrized __a = Tracker(self.src)(__SCREAMING_SNAKE_CASE).parametrized __a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.src_skip , __SCREAMING_SNAKE_CASE)) __a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.dest_skip , __SCREAMING_SNAKE_CASE)) if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE) and self.raise_if_mismatch: raise Exception( F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE)} operations while' F' destination module has {len(__SCREAMING_SNAKE_CASE)}.') for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}') class _A ( nn.Module ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : nn.Module): '''simple docstring''' super().__init__() __a = [] # - get the stem feature_blocks.append(('''conv1''', model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('''block'''), F'Unexpected layer name {k}' __a = len(__SCREAMING_SNAKE_CASE) + 1 feature_blocks.append((F'res{block_index}', v)) __a = nn.ModuleDict(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor): '''simple docstring''' return get_trunk_forward_outputs( __SCREAMING_SNAKE_CASE , out_feat_keys=__SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , ) class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' __a = x.split('''-''') return x_split[0] + x_split[1] + "_" + "".join(x_split[2:]) def __getitem__( self : List[Any] , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' if x not in self: __a = self.convert_name_to_timm(__SCREAMING_SNAKE_CASE) __a = partial(lambda: (timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE).eval(), None)) else: __a = super().__getitem__(__SCREAMING_SNAKE_CASE) return val class _A ( __UpperCAmelCase ): def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' if "seer" in x and "in1k" not in x: __a = RegNetModel else: __a = RegNetForImageClassification return val def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for from_key, to_key in keys: __a = from_state_dict[from_key].clone() print(f'Copied key={from_key} to={to_key}' ) return to_state_dict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): print(f'Converting {name}...' ) with torch.no_grad(): __a , __a = from_model_func() __a = our_model_func(_UpperCAmelCase ).eval() __a = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase , raise_if_mismatch=_UpperCAmelCase ) __a = torch.randn((1, 3, 224, 224) ) module_transfer(_UpperCAmelCase ) if from_state_dict is not None: __a = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: __a = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')] __a = manually_copy_vissl_head(_UpperCAmelCase , our_model.state_dict() , _UpperCAmelCase ) our_model.load_state_dict(_UpperCAmelCase ) __a = our_model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) __a = ( our_outputs.logits if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else our_outputs.last_hidden_state ) __a = from_model(_UpperCAmelCase ) __a = from_output[-1] if type(_UpperCAmelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: __a = our_outputs.hidden_states[-1] assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_UpperCAmelCase , ) __a = 224 if '''seer''' not in name else 384 # we can use the convnext one __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_UpperCAmelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_UpperCAmelCase , ) print(f'Pushed {name}' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ): __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = (1, num_labels) __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) __a = { '''regnet-x-002''': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ), '''regnet-x-004''': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ), '''regnet-x-006''': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ), '''regnet-x-008''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ), '''regnet-x-016''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ), '''regnet-x-032''': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ), '''regnet-x-040''': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ), '''regnet-x-064''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ), '''regnet-x-080''': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ), '''regnet-x-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ), '''regnet-x-160''': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ), '''regnet-x-320''': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ), # y variant '''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), '''regnet-y-004''': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), '''regnet-y-006''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), '''regnet-y-008''': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), '''regnet-y-016''': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), '''regnet-y-032''': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), '''regnet-y-040''': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), '''regnet-y-064''': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), '''regnet-y-080''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), '''regnet-y-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), '''regnet-y-160''': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), '''regnet-y-320''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 '''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), '''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), '''regnet-y-1280-seer''': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), '''regnet-y-2560-seer''': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), '''regnet-y-10b-seer''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet '''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), '''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), '''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), '''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), '''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } __a = NameToOurModelFuncMap() __a = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(_UpperCAmelCase , _UpperCAmelCase ) -> Tuple[nn.Module, Dict]: __a = torch.hub.load_state_dict_from_url(_UpperCAmelCase , model_dir=str(_UpperCAmelCase ) , map_location='''cpu''' ) __a = model_func() # check if we have a head, if yes add it __a = files['''classy_state_dict''']['''base_model''']['''model'''] __a = model_state_dict['''trunk'''] model.load_state_dict(_UpperCAmelCase ) return model.eval(), model_state_dict["heads"] # pretrained __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) __a = partial( _UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( _UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) return config, expected_shape if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported regnet* architecture,''' ''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __snake_case :Tuple = parser.parse_args() __snake_case :Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
131
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase a = logging.get_logger(__name__) a = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class SCREAMING_SNAKE_CASE__ ( lowercase_ ): _a = 'longformer' def __init__( self : Union[str, Any] , lowerCAmelCase : Union[List[int], int] = 512 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 3_0522 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 3072 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 2 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1e-12 , lowerCAmelCase : bool = False , **lowerCAmelCase : List[str] , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCAmelCase = attention_window lowerCAmelCase = sep_token_id lowerCAmelCase = bos_token_id lowerCAmelCase = eos_token_id lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = onnx_export class SCREAMING_SNAKE_CASE__ ( lowercase_ ): def __init__( self : List[Any] , lowerCAmelCase : "PretrainedConfig" , lowerCAmelCase : str = "default" , lowerCAmelCase : "List[PatchingSpec]" = None ): super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase = True @property def __lowercase ( self : Dict ): if self.task == "multiple-choice": lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def __lowercase ( self : int ): lowerCAmelCase = super().outputs if self.task == "default": lowerCAmelCase = {0: 'batch'} return outputs @property def __lowercase ( self : Dict ): return 1e-4 @property def __lowercase ( self : Union[str, Any] ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def __lowercase ( self : Tuple , lowerCAmelCase : "PreTrainedTokenizerBase" , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ): lowerCAmelCase = super().generate_dummy_inputs( preprocessor=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global lowerCAmelCase = 1 return inputs
155
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( """This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.""" """It takes two arguments named `image` which should be the original image, and `label` which should be a text """ """describing the elements what should be identified in the segmentation mask. The tool returns the mask.""" ) _SCREAMING_SNAKE_CASE = """CIDAS/clipseg-rd64-refined""" _SCREAMING_SNAKE_CASE = """image_segmenter""" _SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation _SCREAMING_SNAKE_CASE = ["""image""", """text"""] _SCREAMING_SNAKE_CASE = ["""image"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ): requires_backends(self , ['vision'] ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ): return self.pre_processor(text=[label] , images=[image] , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ): with torch.no_grad(): lowerCAmelCase_ : List[str] = self.model(**SCREAMING_SNAKE_CASE_ ).logits return logits def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): lowerCAmelCase_ : Dict = outputs.cpu().detach().numpy() lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : Optional[Any] = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
224
0
"""simple docstring""" import os import sys import unittest a__ : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a__ : Any = os.path.join(git_repo_path, '''src''', '''diffusers''') class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : int ) -> Optional[int]: __SCREAMING_SNAKE_CASE = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCAmelCase__ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __SCREAMING_SNAKE_CASE = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCAmelCase__ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __SCREAMING_SNAKE_CASE = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCAmelCase__ , "torch_and_transformers_and_onnx" ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCAmelCase__ ) self.assertIn("torch_and_transformers" , UpperCAmelCase__ ) self.assertIn("flax_and_transformers" , UpperCAmelCase__ ) self.assertIn("torch_and_transformers_and_onnx" , UpperCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def UpperCAmelCase_ ( self : List[Any] ) -> int: __SCREAMING_SNAKE_CASE = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCAmelCase__ , "\nCONSTANT = None\n" ) __SCREAMING_SNAKE_CASE = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCAmelCase__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) __SCREAMING_SNAKE_CASE = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" __SCREAMING_SNAKE_CASE = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> int: __SCREAMING_SNAKE_CASE = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" __SCREAMING_SNAKE_CASE = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCAmelCase__ )
195
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if dataset.ndim != value_array.ndim: __SCREAMING_SNAKE_CASE = ( "Wrong input data's dimensions... " f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(lowerCAmelCase_ ) try: if dataset.shape[1] != value_array.shape[1]: __SCREAMING_SNAKE_CASE = ( "Wrong input data's shape... " f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(lowerCAmelCase_ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: __SCREAMING_SNAKE_CASE = ( "Input data have different datatype... " f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for value in value_array: __SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , dataset[0] ) __SCREAMING_SNAKE_CASE = dataset[0].tolist() for dataset_value in dataset[1:]: __SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , lowerCAmelCase_ ) if dist > temp_dist: __SCREAMING_SNAKE_CASE = temp_dist __SCREAMING_SNAKE_CASE = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ )) if __name__ == "__main__": import doctest doctest.testmod()
195
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case :List[Any] = logging.get_logger(__name__) __snake_case :Dict = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = '''mobilenet_v2''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=224 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : int=6 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=0.8 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : Dict=255 , **__SCREAMING_SNAKE_CASE : Optional[int] , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') __a = num_channels __a = image_size __a = depth_multiplier __a = depth_divisible_by __a = min_depth __a = expand_ratio __a = output_stride __a = first_layer_is_expansion __a = finegrained_output __a = hidden_act __a = tf_padding __a = classifier_dropout_prob __a = initializer_range __a = layer_norm_eps __a = semantic_loss_ignore_index class _A ( __UpperCAmelCase ): UpperCamelCase__ : Any = version.parse('''1.11''' ) @property def _lowerCamelCase ( self : Dict): '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return 1E-4
49
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase :List[str] = logging.get_logger(__name__) class a ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE : str = ["input_features", "is_longer"] def __init__( self : Optional[Any] , snake_case : int=64 , snake_case : List[str]=4_8000 , snake_case : int=480 , snake_case : Optional[int]=10 , snake_case : List[str]=1024 , snake_case : List[str]=0.0 , snake_case : Union[str, Any]=False , snake_case : float = 0 , snake_case : float = 1_4000 , snake_case : int = None , snake_case : str = "fusion" , snake_case : str = "repeatpad" , **snake_case : Dict , ) -> Union[str, Any]: super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) __UpperCAmelCase : str = top_db __UpperCAmelCase : List[str] = truncation __UpperCAmelCase : Union[str, Any] = padding __UpperCAmelCase : str = fft_window_size __UpperCAmelCase : List[str] = (fft_window_size >> 1) + 1 __UpperCAmelCase : Any = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : str = max_length_s * sampling_rate __UpperCAmelCase : Any = sampling_rate __UpperCAmelCase : Optional[int] = frequency_min __UpperCAmelCase : Dict = frequency_max __UpperCAmelCase : Optional[int] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='''htk''' , ) __UpperCAmelCase : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='''slaney''' , mel_scale='''slaney''' , ) def lowerCamelCase__ ( self : str ) -> Dict[str, Any]: __UpperCAmelCase : int = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : Optional[int] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCamelCase__ ( self : str , snake_case : np.array , snake_case : Optional[np.array] = None ) -> np.ndarray: __UpperCAmelCase : Dict = spectrogram( snake_case , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='''dB''' , ) return log_mel_spectrogram.T def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple ) -> List[str]: __UpperCAmelCase : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] # randomly choose index for each part __UpperCAmelCase : Any = np.random.choice(ranges[0] ) __UpperCAmelCase : List[str] = np.random.choice(ranges[1] ) __UpperCAmelCase : Dict = np.random.choice(ranges[2] ) __UpperCAmelCase : Dict = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : Any = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Any = torch.tensor(mel[None, None, :] ) __UpperCAmelCase : List[str] = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case ) __UpperCAmelCase : List[Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCamelCase__ ( self : Dict , snake_case : np.array , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : Optional[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : Dict = len(snake_case ) - max_length __UpperCAmelCase : Optional[int] = np.random.randint(0 , overflow + 1 ) __UpperCAmelCase : Dict = waveform[idx : idx + max_length] __UpperCAmelCase : List[str] = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": __UpperCAmelCase : Dict = self._np_extract_fbank_features(snake_case , self.mel_filters ) __UpperCAmelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : str = np.stack([mel, mel, mel, mel] , axis=0 ) __UpperCAmelCase : Union[str, Any] = False else: __UpperCAmelCase : List[Any] = self._random_mel_fusion(snake_case , snake_case , snake_case ) __UpperCAmelCase : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: __UpperCAmelCase : Dict = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(snake_case ) ) __UpperCAmelCase : Tuple = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(snake_case ) ) __UpperCAmelCase : Tuple = np.stack(np.tile(snake_case , snake_case ) ) __UpperCAmelCase : List[Any] = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(snake_case , self.mel_filters ) __UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: __UpperCAmelCase : List[str] = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : str = None , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Dict , ) -> BatchFeature: __UpperCAmelCase : List[Any] = truncation if truncation is not None else self.truncation __UpperCAmelCase : Union[str, Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __UpperCAmelCase : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __UpperCAmelCase : int = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCAmelCase : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): __UpperCAmelCase : Union[str, Any] = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCAmelCase : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCAmelCase : Any = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[Any] = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] __UpperCAmelCase : Any = [] __UpperCAmelCase : Any = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : int = np.random.randint(0 , len(snake_case ) ) __UpperCAmelCase : List[str] = True if isinstance(input_mel[0] , snake_case ): __UpperCAmelCase : str = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : Optional[int] = [[longer] for longer in is_longer] __UpperCAmelCase : List[str] = {'''input_features''': input_mel, '''is_longer''': is_longer} __UpperCAmelCase : Optional[Any] = BatchFeature(snake_case ) if return_tensors is not None: __UpperCAmelCase : List[Any] = input_features.convert_to_tensors(snake_case ) return input_features
352
'''simple docstring''' import os import sys import unittest __UpperCAmelCase :Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase :Dict = os.path.join(git_repo_path, "src", "diffusers") class a ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Any ) -> int: __UpperCAmelCase : Optional[Any] = find_backend(''' if not is_torch_available():''' ) self.assertEqual(snake_case , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __UpperCAmelCase : Union[str, Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(snake_case , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __UpperCAmelCase : List[str] = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(snake_case , '''torch_and_transformers_and_onnx''' ) def lowerCamelCase__ ( self : Optional[int] ) -> int: __UpperCAmelCase : Tuple = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , snake_case ) self.assertIn('''torch_and_transformers''' , snake_case ) self.assertIn('''flax_and_transformers''' , snake_case ) self.assertIn('''torch_and_transformers_and_onnx''' , snake_case ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(snake_case , '''\nCONSTANT = None\n''' ) __UpperCAmelCase : Union[str, Any] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( snake_case , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __UpperCAmelCase : Optional[int] = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' __UpperCAmelCase : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(snake_case , snake_case ) def lowerCamelCase__ ( self : int ) -> List[Any]: __UpperCAmelCase : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' __UpperCAmelCase : Optional[int] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , snake_case )
240
0
'''simple docstring''' import numpy as np import qiskit def _lowerCAmelCase ( __snake_case : int = 8 , __snake_case : int | None = None ) -> str: __A : int = np.random.default_rng(seed=_UpperCAmelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __A : Dict = 6 * key_len # Measurement basis for Alice's qubits. __A : Optional[int] = rng.integers(2 , size=_UpperCAmelCase ) # The set of states Alice will prepare. __A : Optional[int] = rng.integers(2 , size=_UpperCAmelCase ) # Measurement basis for Bob's qubits. __A : List[str] = rng.integers(2 , size=_UpperCAmelCase ) # Quantum Circuit to simulate BB84 __A : Optional[Any] = qiskit.QuantumCircuit(_UpperCAmelCase , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_UpperCAmelCase ): if alice_state[index] == 1: bbaa_circ.x(_UpperCAmelCase ) if alice_basis[index] == 1: bbaa_circ.h(_UpperCAmelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_UpperCAmelCase ): if bob_basis[index] == 1: bbaa_circ.h(_UpperCAmelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __A : Dict = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __A : Union[str, Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1 , seed_simulator=_UpperCAmelCase ) # Returns the result of measurement. __A : Optional[Any] = job.result().get_counts(_UpperCAmelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __A : str = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __A : List[Any] = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase , '0' ) return key if __name__ == "__main__": print(f"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
190
'''simple docstring''' from __future__ import annotations from typing import TypedDict class A__ ( UpperCAmelCase__ ): __UpperCamelCase : str __UpperCamelCase : int def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) _a : List[Any] =all_rotations(_UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _a : BWTTransformDict ={ "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_UpperCAmelCase ), } return response def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: _a : List[str] =int(_UpperCAmelCase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(_UpperCAmelCase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) _a : Optional[int] =[""""""] * len(_UpperCAmelCase ) for _ in range(len(_UpperCAmelCase ) ): for i in range(len(_UpperCAmelCase ) ): _a : int =bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": A__: Any = '''Provide a string that I will generate its BWT transform: ''' A__: Union[str, Any] = input(entry_msg).strip() A__: Optional[int] = bwt_transform(s) print( F"Burrows Wheeler transform for string '{s}' results " F"in '{result['bwt_string']}'" ) A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' " F"we get original string '{original_string}'" )
276
0
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : Union[str, Any] =LEDTokenizer a : Dict =LEDTokenizerFast a : Union[str, Any] =True def lowerCamelCase__ ( self ): '''simple docstring''' super().setUp() __lowerCAmelCase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __lowerCAmelCase = {"""unk_token""": """<unk>"""} __lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" ) with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ) self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) self.assertEqual((2, 9),batch.input_ids.shape ) self.assertEqual((2, 9),batch.attention_mask.shape ) __lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) @require_torch def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ) self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE ) self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE ) self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE ) self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE ) @require_torch def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" ) self.assertEqual(32,targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowerCAmelCase = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ) self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) self.assertEqual(batch.input_ids.shape,(2, 51_22) ) @require_torch def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = ["""A long paragraph for summarization."""] __lowerCAmelCase = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ) __lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ) __lowerCAmelCase = inputs["""input_ids"""] __lowerCAmelCase = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowerCAmelCase = ["""Summary of the text.""", """Another summary."""] __lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = [[0] * len(__SCREAMING_SNAKE_CASE ) for x in encoded_output["""input_ids"""]] __lowerCAmelCase = tokenizer.pad(__SCREAMING_SNAKE_CASE ) self.assertSequenceEqual(outputs["""global_attention_mask"""],__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' pass def lowerCamelCase__ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = """A, <mask> AllenNLP sentence.""" __lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),) __lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
363
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : str =KandinskyVaaInpaintPipeline a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] a : str =[ """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] a : Optional[int] =[ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Dict =False @property def lowerCamelCase__ ( self ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self ): '''simple docstring''' return 1_00 @property def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def lowerCamelCase__ ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = DDIMScheduler( num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create mask __lowerCAmelCase = np.ones((64, 64),dtype=np.floataa ) __lowerCAmelCase = 0 if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def lowerCamelCase__ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) __lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa ) __lowerCAmelCase = 0 __lowerCAmelCase = """a hat""" __lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( __SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple() __lowerCAmelCase = pipeline( image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",) __lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
46
0
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging A : Optional[Any] = logging.get_logger(__name__) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = os.getenv("SM_HP_MP_PARAMETERS" , "{}" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __lowerCAmelCase = json.loads(_UpperCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __lowerCAmelCase = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __lowerCAmelCase = json.loads(_UpperCamelCase ) if not mpi_options.get("sagemaker_mpi_enabled" , _UpperCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : str =field( default="""""" ,metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} ,) def snake_case ( self ): super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead." , __a , ) @cached_property def snake_case ( self ): logger.info("PyTorch: setting up devices" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: __lowerCAmelCase = torch.device("cpu" ) __lowerCAmelCase = 0 elif is_sagemaker_model_parallel_available(): __lowerCAmelCase = smp.local_rank() __lowerCAmelCase = torch.device("cuda" , __a ) __lowerCAmelCase = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta ) __lowerCAmelCase = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) ) __lowerCAmelCase = torch.device("cuda" , self.local_rank ) __lowerCAmelCase = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __lowerCAmelCase = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta ) __lowerCAmelCase = torch.device("cuda" , self.local_rank ) __lowerCAmelCase = 1 if device.type == "cuda": torch.cuda.set_device(__a ) return device @property def snake_case ( self ): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def snake_case ( self ): return not is_sagemaker_model_parallel_available() @property def snake_case ( self ): return False
57
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
"""simple docstring""" import logging from transformers import PretrainedConfig lowerCAmelCase_ : List[Any] = logging.getLogger(__name__) lowerCAmelCase_ : int = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class UpperCamelCase_ ( lowerCamelCase_ ): _A : Tuple = """bertabs""" def __init__( self , snake_case__=3_05_22 , snake_case__=5_12 , snake_case__=6 , snake_case__=5_12 , snake_case__=8 , snake_case__=5_12 , snake_case__=0.2 , snake_case__=6 , snake_case__=7_68 , snake_case__=8 , snake_case__=20_48 , snake_case__=0.2 , **snake_case__ , ) -> Any: """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase = vocab_size UpperCAmelCase = max_pos UpperCAmelCase = enc_layers UpperCAmelCase = enc_hidden_size UpperCAmelCase = enc_heads UpperCAmelCase = enc_ff_size UpperCAmelCase = enc_dropout UpperCAmelCase = dec_layers UpperCAmelCase = dec_hidden_size UpperCAmelCase = dec_heads UpperCAmelCase = dec_ff_size UpperCAmelCase = dec_dropout
367
"""simple docstring""" import os def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = len(grid[0] ) UpperCAmelCase = len(lowerCAmelCase ) UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowerCAmelCase ): for j in range(n_rows - 3 ): UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: UpperCAmelCase = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: UpperCAmelCase = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) UpperCAmelCase = max( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if max_product > largest: UpperCAmelCase = max_product return largest def _lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase = [] with open(os.path.dirname(lowerCAmelCase ) + """/grid.txt""" ) as file: for line in file: grid.append(line.strip("""\n""" ).split(""" """ ) ) UpperCAmelCase = [[int(lowerCAmelCase ) for i in grid[j]] for j in range(len(lowerCAmelCase ) )] return largest_product(lowerCAmelCase ) if __name__ == "__main__": print(solution())
248
0
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : NestedDataStructureLike[PathLike] , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : List[str] , ) -> List[Any]: """simple docstring""" super().__init__( UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths} __magic_name__ = Text( cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , **UpperCamelCase__ , ) def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" if self.streaming: __magic_name__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None self.builder.download_and_prepare( download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , ) __magic_name__ = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory ) return dataset
88
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Any = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split() ) __lowerCAmelCase : str = '|'.join(sys.argv[1:]) __lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
88
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple[str, float]: '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[str] ="""ssube/stable-diffusion-x4-upscaler-onnx""" def snake_case ( self , __a=0 ): __lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__a ) ) __lowerCAmelCase = torch.manual_seed(__a ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**__a ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case ( self ): __lowerCAmelCase = ort.SessionOptions() __lowerCAmelCase = False return options def snake_case ( self ): __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowerCAmelCase = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = "A fantasy landscape, trending on artstation" __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__a , image=__a , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def snake_case ( self ): __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowerCAmelCase = init_image.resize((1_28, 1_28) ) __lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) __lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __lowerCAmelCase = "A fantasy landscape, trending on artstation" __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__a , image=__a , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
57
_lowerCAmelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _lowerCAmelCase : Tuple = [{"type": "code", "content": INSTALL_CONTENT}] _lowerCAmelCase : Optional[int] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
218
0
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets lowerCAmelCase_ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' lowerCAmelCase_ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' lowerCAmelCase_ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] ,reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] ,) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : int=None ,_snake_case : Optional[Any]=True ,_snake_case : Tuple=False ) -> List[str]: """simple docstring""" if rouge_types is None: lowercase__ : Tuple = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] lowercase__ : str = rouge_scorer.RougeScorer(rouge_types=_snake_case ,use_stemmer=_snake_case ) if use_aggregator: lowercase__ : Optional[int] = scoring.BootstrapAggregator() else: lowercase__ : Any = [] for ref, pred in zip(_snake_case ,_snake_case ): lowercase__ : str = scorer.score(_snake_case ,_snake_case ) if use_aggregator: aggregator.add_scores(_snake_case ) else: scores.append(_snake_case ) if use_aggregator: lowercase__ : Optional[int] = aggregator.aggregate() else: lowercase__ : Dict = {} for key in scores[0]: lowercase__ : Optional[int] = [score[key] for score in scores] return result
302
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase_ = logging.get_logger(__name__) # General docstring lowerCAmelCase_ = 'RegNetConfig' # Base docstring lowerCAmelCase_ = 'facebook/regnet-y-040' lowerCAmelCase_ = [1, 1_088, 7, 7] # Image classification docstring lowerCAmelCase_ = 'facebook/regnet-y-040' lowerCAmelCase_ = 'tabby, tabby cat' lowerCAmelCase_ = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __A ( nn.Module ): '''simple docstring''' def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]: """simple docstring""" super().__init__() lowercase__ : Tuple = nn.Convad( _snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,) lowercase__ : List[Any] = nn.BatchNormad(_snake_case ) lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ : Optional[Any] = self.convolution(_snake_case ) lowercase__ : Tuple = self.normalization(_snake_case ) lowercase__ : Tuple = self.activation(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase__ : List[Any] = RegNetConvLayer( config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ) lowercase__ : str = config.num_channels def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str: """simple docstring""" lowercase__ : Union[str, Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) lowercase__ : Optional[int] = self.embedder(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any: """simple docstring""" super().__init__() lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case ) lowercase__ : Any = nn.BatchNormad(_snake_case ) def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor: """simple docstring""" lowercase__ : Union[str, Any] = self.convolution(_snake_case ) lowercase__ : Optional[int] = self.normalization(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict: """simple docstring""" super().__init__() lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) ) lowercase__ : Dict = nn.Sequential( nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,) def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ : List[str] = self.pooler(_snake_case ) lowercase__ : Union[str, Any] = self.attention(_snake_case ) lowercase__ : List[str] = hidden_state * attention return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]: """simple docstring""" super().__init__() lowercase__ : Tuple = in_channels != out_channels or stride != 1 lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width ) lowercase__ : str = ( RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity() ) lowercase__ : Optional[int] = nn.Sequential( RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,) lowercase__ : str = ACTaFN[config.hidden_act] def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]: """simple docstring""" lowercase__ : Tuple = hidden_state lowercase__ : Union[str, Any] = self.layer(_snake_case ) lowercase__ : List[Any] = self.shortcut(_snake_case ) hidden_state += residual lowercase__ : Optional[int] = self.activation(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]: """simple docstring""" super().__init__() lowercase__ : List[Any] = in_channels != out_channels or stride != 1 lowercase__ : List[str] = max(1 ,out_channels // config.groups_width ) lowercase__ : Tuple = ( RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity() ) lowercase__ : str = nn.Sequential( RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,) lowercase__ : Optional[Any] = ACTaFN[config.hidden_act] def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ : str = hidden_state lowercase__ : Optional[Any] = self.layer(_snake_case ) lowercase__ : int = self.shortcut(_snake_case ) hidden_state += residual lowercase__ : str = self.activation(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict: """simple docstring""" super().__init__() lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer lowercase__ : Optional[Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,) def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]: """simple docstring""" lowercase__ : List[str] = self.layers(_snake_case ) return hidden_state class __A ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]: """simple docstring""" super().__init__() lowercase__ : str = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) ) lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ): self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) ) def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" lowercase__ : Dict = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase__ : int = hidden_states + (hidden_state,) lowercase__ : Any = stage_module(_snake_case ) if output_hidden_states: lowercase__ : Optional[int] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case ) class __A ( A_ ): '''simple docstring''' lowerCAmelCase : int = RegNetConfig lowerCAmelCase : List[Any] = "regnet" lowerCAmelCase : Optional[int] = "pixel_values" lowerCAmelCase : Union[str, Any] = True def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" if isinstance(_snake_case ,nn.Convad ): nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' ) elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight ,1 ) nn.init.constant_(module.bias ,0 ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]: """simple docstring""" if isinstance(_snake_case ,_snake_case ): lowercase__ : str = value lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." ,A_ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __A ( A_ ): '''simple docstring''' def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple: """simple docstring""" super().__init__(_snake_case ) lowercase__ : Any = config lowercase__ : List[str] = RegNetEmbeddings(_snake_case ) lowercase__ : Any = RegNetEncoder(_snake_case ) lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" lowercase__ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : Union[str, Any] = self.embedder(_snake_case ) lowercase__ : List[Any] = self.encoder( _snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ) lowercase__ : str = encoder_outputs[0] lowercase__ : Optional[int] = self.pooler(_snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __A ( A_ ): '''simple docstring''' def __init__( self : int ,_snake_case : Tuple ) -> Any: """simple docstring""" super().__init__(_snake_case ) lowercase__ : Optional[Any] = config.num_labels lowercase__ : int = RegNetModel(_snake_case ) # classification head lowercase__ : str = nn.Sequential( nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention: """simple docstring""" lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ) lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1] lowercase__ : Union[str, Any] = self.classifier(_snake_case ) lowercase__ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase__ : List[Any] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase__ : Dict = '''single_label_classification''' else: lowercase__ : Optional[int] = '''multi_label_classification''' if self.config.problem_type == "regression": lowercase__ : Union[str, Any] = MSELoss() if self.num_labels == 1: lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() ) else: lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case ) elif self.config.problem_type == "single_label_classification": lowercase__ : Tuple = CrossEntropyLoss() lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase__ : Any = BCEWithLogitsLoss() lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case ) if not return_dict: lowercase__ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
302
1
class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : str ): '''simple docstring''' lowercase__ = name lowercase__ = value lowercase__ = weight def __repr__( self : Union[str, Any] ): '''simple docstring''' return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})""" def lowercase__ ( self : Tuple ): '''simple docstring''' return self.value def lowercase__ ( self : str ): '''simple docstring''' return self.name def lowercase__ ( self : Optional[Any] ): '''simple docstring''' return self.weight def lowercase__ ( self : Optional[int] ): '''simple docstring''' return self.value / self.weight def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) lowercase__ = [] lowercase__ , lowercase__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' pass if __name__ == "__main__": import doctest doctest.testmod()
207
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) UpperCAmelCase = str(bin(lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) UpperCAmelCase = str(bin(lowerCAmelCase ) )[2:] if shift_amount >= len(lowerCAmelCase ): return "0b0" UpperCAmelCase = binary_number[: len(lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' if number >= 0: # Get binary representation of positive number UpperCAmelCase = """0""" + str(bin(lowerCAmelCase ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number UpperCAmelCase = len(bin(lowerCAmelCase )[3:] ) # Find 2's complement of number UpperCAmelCase = bin(abs(lowerCAmelCase ) - (1 << binary_number_length) )[3:] UpperCAmelCase = ( """1""" + """0""" * (binary_number_length - len(lowerCAmelCase )) + binary_number ) if shift_amount >= len(lowerCAmelCase ): return "0b" + binary_number[0] * len(lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
248
"""simple docstring""" import os def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = len(grid[0] ) UpperCAmelCase = len(lowerCAmelCase ) UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowerCAmelCase ): for j in range(n_rows - 3 ): UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: UpperCAmelCase = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: UpperCAmelCase = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) UpperCAmelCase = max( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if max_product > largest: UpperCAmelCase = max_product return largest def _lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase = [] with open(os.path.dirname(lowerCAmelCase ) + """/grid.txt""" ) as file: for line in file: grid.append(line.strip("""\n""" ).split(""" """ ) ) UpperCAmelCase = [[int(lowerCAmelCase ) for i in grid[j]] for j in range(len(lowerCAmelCase ) )] return largest_product(lowerCAmelCase ) if __name__ == "__main__": print(solution())
248
1
'''simple docstring''' def lowercase__( __UpperCamelCase: Any ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [0] * len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Dict = [1] * len(__UpperCamelCase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__UpperCamelCase ) ): if indegree[i] == 0: queue.append(__UpperCamelCase ) while queue: SCREAMING_SNAKE_CASE : Dict = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: SCREAMING_SNAKE_CASE : List[Any] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__UpperCamelCase ) print(max(__UpperCamelCase ) ) # Adjacency list of Graph UpperCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
251
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''input_values''', '''padding_mask'''] def __init__( self, A = 1, A = 24_000, A = 0.0, A = None, A = None, **A, ): '''simple docstring''' super().__init__(feature_size=A, sampling_rate=A, padding_value=A, **A ) SCREAMING_SNAKE_CASE : Any = chunk_length_s SCREAMING_SNAKE_CASE : Dict = overlap @property def UpperCamelCase_ ( self ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def UpperCamelCase_ ( self ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self, A, A = None, A = False, A = None, A = None, A = None, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" F" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs SCREAMING_SNAKE_CASE : Tuple = True SCREAMING_SNAKE_CASE : Optional[int] = bool( isinstance(A, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : str = [np.asarray(A ).T] # verify inputs are valid for idx, example in enumerate(A ): if example.ndim > 2: raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[str] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: SCREAMING_SNAKE_CASE : Optional[int] = min(array.shape[0] for array in raw_audio ) SCREAMING_SNAKE_CASE : Tuple = int(np.floor(max_length / self.chunk_stride ) ) SCREAMING_SNAKE_CASE : int = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: SCREAMING_SNAKE_CASE : str = max(array.shape[0] for array in raw_audio ) SCREAMING_SNAKE_CASE : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) SCREAMING_SNAKE_CASE : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length SCREAMING_SNAKE_CASE : List[str] = 'max_length' else: SCREAMING_SNAKE_CASE : List[Any] = input_values # normal padding on batch if padded_inputs is None: SCREAMING_SNAKE_CASE : int = self.pad( A, max_length=A, truncation=A, padding=A, return_attention_mask=A, ) if padding: SCREAMING_SNAKE_CASE : Dict = padded_inputs.pop('attention_mask' ) SCREAMING_SNAKE_CASE : Optional[int] = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: SCREAMING_SNAKE_CASE : List[str] = example[..., None] input_values.append(example.T ) SCREAMING_SNAKE_CASE : Dict = input_values if return_tensors is not None: SCREAMING_SNAKE_CASE : int = padded_inputs.convert_to_tensors(A ) return padded_inputs
251
1
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') __UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') __UpperCAmelCase = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: __UpperCAmelCase = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) __UpperCAmelCase = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
103
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = ShapEPipeline UpperCAmelCase__ : List[Any] = ["prompt"] UpperCAmelCase__ : List[str] = ["prompt"] UpperCAmelCase__ : int = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] UpperCAmelCase__ : Union[str, Any] = False @property def snake_case_ ( self ) -> Union[str, Any]: return 32 @property def snake_case_ ( self ) -> List[str]: return 32 @property def snake_case_ ( self ) -> int: return self.time_input_dim * 4 @property def snake_case_ ( self ) -> Optional[int]: return 8 @property def snake_case_ ( self ) -> str: UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def snake_case_ ( self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCamelCase : Dict = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ ) @property def snake_case_ ( self ) -> Dict: torch.manual_seed(0 ) UpperCamelCase : int = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ ) return model @property def snake_case_ ( self ) -> Tuple: torch.manual_seed(0 ) UpperCamelCase : Any = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } UpperCamelCase : Dict = ShapERenderer(**SCREAMING_SNAKE_CASE_ ) return model def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[Any] = self.dummy_prior UpperCamelCase : int = self.dummy_text_encoder UpperCamelCase : Dict = self.dummy_tokenizer UpperCamelCase : List[str] = self.dummy_renderer UpperCamelCase : str = HeunDiscreteScheduler( beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, ) UpperCamelCase : List[str] = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Any: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def snake_case_ ( self ) -> int: UpperCamelCase : str = 'cpu' UpperCamelCase : Optional[int] = self.get_dummy_components() UpperCamelCase : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : Optional[int] = output.images[0] UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase : List[str] = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ ( self ) -> int: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case_ ( self ) -> str: UpperCamelCase : str = torch_device == 'cpu' UpperCamelCase : Optional[int] = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, ) def snake_case_ ( self ) -> List[Any]: UpperCamelCase : List[Any] = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase : Union[str, Any] = 2 UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase : List[Any] = batch_size * [inputs[key]] UpperCamelCase : int = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> Tuple: UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) UpperCamelCase : Optional[int] = ShapEPipeline.from_pretrained('openai/shap-e' ) UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) UpperCamelCase : Dict = pipe( 'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
103
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCAmelCase_ ( __lowerCamelCase ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e_00 and cp <= 0X9f_ff) or (cp >= 0X34_00 and cp <= 0X4d_bf) # or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) # or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) # or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) # or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) # or (cp >= 0Xf9_00 and cp <= 0Xfa_ff) or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) # ): # return True return False def lowerCAmelCase_ ( __lowerCamelCase ): # word like '180' or '身高' or '神' for char in word: __snake_case : List[Any] = ord(__lowerCamelCase ) if not _is_chinese_char(__lowerCamelCase ): return 0 return 1 def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : List[Any] = set() for token in tokens: __snake_case : Tuple = len(__lowerCamelCase ) > 1 and is_chinese(__lowerCamelCase ) if chinese_word: word_set.add(__lowerCamelCase ) __snake_case : List[Any] = list(__lowerCamelCase ) return word_list def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): if not chinese_word_set: return bert_tokens __snake_case : Optional[int] = max([len(__lowerCamelCase ) for w in chinese_word_set] ) __snake_case : Any = bert_tokens __snake_case , __snake_case : Optional[Any] = 0, len(__lowerCamelCase ) while start < end: __snake_case : Union[str, Any] = True if is_chinese(bert_word[start] ): __snake_case : Any = min(end - start , __lowerCamelCase ) for i in range(__lowerCamelCase , 1 , -1 ): __snake_case : str = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __snake_case : Optional[Any] = "##" + bert_word[j] __snake_case : Optional[int] = start + i __snake_case : Tuple = False break if single_word: start += 1 return bert_word def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : str = [] for i in range(0 , len(__lowerCamelCase ) , 1_0_0 ): __snake_case : Tuple = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0] __snake_case : Union[str, Any] = [get_chinese_word(__lowerCamelCase ) for r in res] ltp_res.extend(__lowerCamelCase ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) __snake_case : Any = [] for i in range(0 , len(__lowerCamelCase ) , 1_0_0 ): __snake_case : str = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__lowerCamelCase , truncation=__lowerCamelCase , max_length=5_1_2 ) bert_res.extend(res["input_ids"] ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) __snake_case : List[str] = [] for input_ids, chinese_word in zip(__lowerCamelCase , __lowerCamelCase ): __snake_case : Tuple = [] for id in input_ids: __snake_case : Optional[int] = bert_tokenizer._convert_id_to_token(__lowerCamelCase ) input_tokens.append(__lowerCamelCase ) __snake_case : Union[str, Any] = add_sub_symbol(__lowerCamelCase , __lowerCamelCase ) __snake_case : Any = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCamelCase ): if token[:2] == "##": __snake_case : Optional[Any] = token[2:] # save chinese tokens' pos if len(__lowerCamelCase ) == 1 and _is_chinese_char(ord(__lowerCamelCase ) ): ref_id.append(__lowerCamelCase ) ref_ids.append(__lowerCamelCase ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) return ref_ids def lowerCAmelCase_ ( __lowerCamelCase ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: __snake_case : Any = f.readlines() __snake_case : Tuple = [line.strip() for line in data if len(__lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __snake_case : Dict = LTP(args.ltp ) # faster in GPU device __snake_case : Optional[Any] = BertTokenizer.from_pretrained(args.bert ) __snake_case : List[Any] = prepare_ref(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) with open(args.save_path , "w" , encoding="utf-8" ) as f: __snake_case : Dict = [json.dumps(__lowerCamelCase ) + "\n" for ref in ref_ids] f.writelines(__lowerCamelCase ) if __name__ == "__main__": _snake_case : int = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") _snake_case : Tuple = parser.parse_args() main(args)
123
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any: __snake_case : List[Any] = dataset __snake_case : Optional[int] = process __snake_case : str = params def __len__( self : Optional[Any] ) -> Any: return len(self.dataset ) def __getitem__( self : Dict , lowerCamelCase : List[Any] ) -> List[str]: __snake_case : List[Any] = self.dataset[i] __snake_case : Tuple = self.process(lowerCamelCase , **self.params ) return processed class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None ) -> int: __snake_case : List[Any] = loader __snake_case : Dict = infer __snake_case : Tuple = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __snake_case : Union[str, Any] = None __snake_case : Optional[Any] = loader_batch_size # Internal bookkeeping __snake_case : int = None __snake_case : Optional[int] = None def __len__( self : Optional[Any] ) -> Tuple: return len(self.loader ) def __iter__( self : str ) -> Tuple: __snake_case : int = iter(self.loader ) return self def __snake_case ( self : int ) -> Any: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(lowerCamelCase , lowerCamelCase ): # Convert ModelOutput to tuple first __snake_case : Dict = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __snake_case : Union[str, Any] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __snake_case : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __snake_case : Tuple = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __snake_case : str = self._loader_batch_data.__class__(lowerCamelCase ) self._loader_batch_index += 1 return result def __snake_case ( self : Dict ) -> Union[str, Any]: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __snake_case : List[str] = next(self.iterator ) __snake_case : int = self.infer(lowerCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowerCamelCase , torch.Tensor ): __snake_case : List[Any] = processed else: __snake_case : Optional[Any] = list(processed.keys() )[0] __snake_case : List[Any] = processed[key] if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : List[str] = len(lowerCamelCase ) else: __snake_case : Tuple = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __snake_case : Optional[Any] = observed_batch_size # Setting internal index to unwrap the batch __snake_case : Union[str, Any] = processed __snake_case : str = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None ) -> Any: super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __iter__( self : Optional[int] ) -> Optional[int]: __snake_case : Union[str, Any] = iter(self.loader ) __snake_case : int = None return self def __snake_case ( self : List[Any] ) -> List[Any]: if self.subiterator is None: __snake_case : Optional[int] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __snake_case : int = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) __snake_case : int = next(self.subiterator ) return processed class a (_lowerCAmelCase ): """simple docstring""" def __iter__( self : Any ) -> Optional[Any]: __snake_case : str = iter(self.loader ) return self def __snake_case ( self : Tuple ) -> str: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. __snake_case : Dict = False __snake_case : Dict = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __snake_case : Union[str, Any] = self.loader_batch_item() __snake_case : Any = item.pop("is_last" ) accumulator.append(lowerCamelCase ) if is_last: return accumulator while not is_last: __snake_case : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(lowerCamelCase , torch.Tensor ): __snake_case : Optional[int] = processed else: __snake_case : Union[str, Any] = list(processed.keys() )[0] __snake_case : Optional[Any] = processed[key] if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : int = len(lowerCamelCase ) else: __snake_case : int = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __snake_case : Dict = observed_batch_size __snake_case : Union[str, Any] = processed __snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: __snake_case : str = self.loader_batch_item() __snake_case : str = item.pop("is_last" ) accumulator.append(lowerCamelCase ) if is_last: return accumulator else: __snake_case : List[str] = processed __snake_case : Tuple = item.pop("is_last" ) accumulator.append(lowerCamelCase ) return accumulator class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : Dataset , lowerCamelCase : str ) -> Optional[Any]: __snake_case : int = dataset __snake_case : Union[str, Any] = key def __len__( self : Tuple ) -> Union[str, Any]: return len(self.dataset ) def __getitem__( self : Optional[Any] , lowerCamelCase : str ) -> Optional[int]: return self.dataset[i][self.key] class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase : Dataset , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]: __snake_case : Any = dataset __snake_case : Any = keya __snake_case : Union[str, Any] = keya def __len__( self : Optional[int] ) -> Tuple: return len(self.dataset ) def __getitem__( self : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
123
1
"""simple docstring""" import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration A_ = 50_00_00 A_ , A_ = os.path.split(__file__) A_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCAmelCase__ (snake_case__ : datasets.Dataset , **snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = dataset.map(**snake_case__ ) @get_duration def UpperCAmelCase__ (snake_case__ : datasets.Dataset , **snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : int = dataset.filter(**snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[str] = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _snake_case : Tuple = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) _snake_case : Any = generate_example_dataset( os.path.join(snake_case__ , """dataset.arrow""" ) , snake_case__ , num_examples=snake_case__ ) _snake_case : List[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=snake_case__ ) def tokenize(snake_case__ : Tuple ): return tokenizer(examples["""text"""] ) _snake_case : List[Any] = map(snake_case__ ) _snake_case : List[Any] = map(snake_case__ , batched=snake_case__ ) _snake_case : Any = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type="""numpy""" ): _snake_case : Dict = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type="""pandas""" ): _snake_case : Any = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): _snake_case : Any = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): _snake_case : Optional[Any] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) _snake_case : str = map(snake_case__ , function=snake_case__ , batched=snake_case__ ) _snake_case : List[Any] = filter(snake_case__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(snake_case__ , """wb""" ) as f: f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
350
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" _snake_case : Union[str, Any] = [] _snake_case : Dict = set({"""(""", """[""", """{"""} ) _snake_case : Union[str, Any] = set({""")""", """]""", """}"""} ) _snake_case : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(snake_case__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(snake_case__ ) == 0 or (len(snake_case__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(snake_case__ ) == 0 def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = input("""Enter sequence of brackets: """ ) if is_balanced(snake_case__ ): print(snake_case__ , """is balanced""" ) else: print(snake_case__ , """is not balanced""" ) if __name__ == "__main__": main()
132
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = StableDiffusionSAGPipeline a_ : Dict = TEXT_TO_IMAGE_PARAMS a_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS a_ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS a_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS a_ : List[str] = False def lowerCamelCase ( self : int ): torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowerCAmelCase_ : Tuple = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) torch.manual_seed(0 ) lowerCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase_ : List[Any] = CLIPTextModel(a_ ) lowerCAmelCase_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCAmelCase_ : str = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[int] , a_ : Dict=0 ): if str(a_ ).startswith("mps" ): lowerCAmelCase_ : str = torch.manual_seed(a_ ) else: lowerCAmelCase_ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCAmelCase_ : Tuple = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def lowerCamelCase ( self : List[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) lowerCAmelCase_ : Optional[Any] = sag_pipe.to(a_ ) sag_pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : List[Any] = "." lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase_ : Optional[int] = sag_pipe( [prompt] , generator=a_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) lowerCAmelCase_ : List[Any] = output.images lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : Dict = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) lowerCAmelCase_ : Any = sag_pipe.to(a_ ) sag_pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : List[Any] = "." lowerCAmelCase_ : Tuple = torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = sag_pipe( [prompt] , generator=a_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) lowerCAmelCase_ : Optional[int] = output.images lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : int = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Any = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) lowerCAmelCase_ : Optional[int] = sag_pipe.to(a_ ) sag_pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Dict = "." lowerCAmelCase_ : Tuple = torch.manual_seed(0 ) lowerCAmelCase_ : str = sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=a_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , ) lowerCAmelCase_ : Union[str, Any] = output.images assert image.shape == (1, 5_12, 7_68, 3)
241
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'summarization' _SCREAMING_SNAKE_CASE = ['loss'] _SCREAMING_SNAKE_CASE = ROUGE_KEYS _SCREAMING_SNAKE_CASE = 'rouge2' def __init__( self , lowercase , **lowercase ) -> str: if hparams.sortish_sampler and hparams.gpus > 1: lowerCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) lowerCAmelCase = Path(self.output_dir ) / """metrics.json""" lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) lowerCAmelCase = 0 lowerCAmelCase = defaultdict(lowercase ) lowerCAmelCase = self.config.model_type lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size lowerCAmelCase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } lowerCAmelCase = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowerCAmelCase = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) lowerCAmelCase = get_git_info()["""repo_sha"""] lowerCAmelCase = hparams.num_workers lowerCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowerCAmelCase = self.decoder_start_token_id lowerCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) lowerCAmelCase = False lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowerCAmelCase = self.hparams.eval_max_gen_length else: lowerCAmelCase = self.model.config.max_length lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def _snake_case ( self , lowercase ) -> Dict[str, List[str]]: lowerCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) lowerCAmelCase = True return readable_batch def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]: return self.model(lowercase , **lowercase ) def _snake_case ( self , lowercase ) -> Union[str, Any]: lowerCAmelCase = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def _snake_case ( self , lowercase ) -> Tuple: lowerCAmelCase = self.tokenizer.pad_token_id lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""] lowerCAmelCase = batch["""labels"""] if isinstance(self.model , lowercase ): lowerCAmelCase = self.model._shift_right(lowercase ) else: lowerCAmelCase = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowerCAmelCase = decoder_input_ids self.save_readable_batch(lowercase ) lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) lowerCAmelCase = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 ) lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def _snake_case ( self ) -> int: return self.tokenizer.pad_token_id def _snake_case ( self , lowercase , lowercase ) -> Dict: lowerCAmelCase = self._step(lowercase ) lowerCAmelCase = dict(zip(self.loss_names , lowercase ) ) # tokens per batch lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() lowerCAmelCase = batch["""input_ids"""].shape[0] lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum() lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def _snake_case ( self , lowercase , lowercase ) -> Dict: return self._generative_step(lowercase ) def _snake_case ( self , lowercase , lowercase="val" ) -> Dict: self.step_count += 1 lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} lowerCAmelCase = losses["""loss"""] lowerCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } lowerCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()} lowerCAmelCase = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor, } def _snake_case ( self , lowercase , lowercase ) -> Dict: return calculate_rouge(lowercase , lowercase ) def _snake_case ( self , lowercase ) -> dict: lowerCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowerCAmelCase = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0] lowerCAmelCase = self.ids_to_clean_text(lowercase ) lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] ) lowerCAmelCase = self._step(lowercase ) lowerCAmelCase = dict(zip(self.loss_names , lowercase ) ) lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase ) lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def _snake_case ( self , lowercase , lowercase ) -> Dict: return self._generative_step(lowercase ) def _snake_case ( self , lowercase ) -> int: return self.validation_epoch_end(lowercase , prefix="""test""" ) def _snake_case ( self , lowercase ) -> SeqaSeqDataset: lowerCAmelCase = self.n_obs[type_path] lowerCAmelCase = self.target_lens[type_path] lowerCAmelCase = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader: lowerCAmelCase = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowerCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def _snake_case ( self ) -> DataLoader: lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def _snake_case ( self ) -> DataLoader: return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def _snake_case ( self ) -> DataLoader: return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def _snake_case ( lowercase , lowercase ) -> Optional[int]: BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( """--max_source_length""" , default=1_024 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=142 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=142 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase ) parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase ) parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase ) parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase ) parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( """--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'translation' _SCREAMING_SNAKE_CASE = ['loss'] _SCREAMING_SNAKE_CASE = ['bleu'] _SCREAMING_SNAKE_CASE = 'bleu' def __init__( self , lowercase , **lowercase ) -> Union[str, Any]: super().__init__(lowercase , **lowercase ) lowerCAmelCase = hparams.src_lang lowerCAmelCase = hparams.tgt_lang def _snake_case ( self , lowercase , lowercase ) -> dict: return calculate_bleu(lowercase , lowercase ) def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 ) if model is None: if "summarization" in args.task: lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE ) lowerCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): lowerCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE ) lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' ) if args.early_stopping_patience >= 0: lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowerCAmelCase = False lowerCAmelCase = args.val_metric == """loss""" lowerCAmelCase = generic_train( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model lowerCAmelCase = """""" lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) ) if checkpoints: lowerCAmelCase = checkpoints[-1] lowerCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser) SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
46
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase__ ( __lowerCamelCase , unittest.TestCase ): A__ : Any =TextToVideoSDPipeline A__ : Union[str, Any] =TEXT_TO_IMAGE_PARAMS A__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. A__ : List[str] =frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def A_ ( self : Union[str, Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) SCREAMING_SNAKE_CASE__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowercase ) SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def A_ ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]=0 ): if str(__lowercase ).startswith('mps' ): SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = TextToVideoSDPipeline(**__lowercase ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowercase ) SCREAMING_SNAKE_CASE__ = '''np''' SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowercase ).frames SCREAMING_SNAKE_CASE__ = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self : int ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A_ ( self : Optional[Any] ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1e-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def A_ ( self : int ): pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def A_ ( self : Tuple ): pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def A_ ( self : List[Any] ): pass def A_ ( self : Tuple ): return super().test_progress_bar() @slow @skip_mps class lowercase__ ( unittest.TestCase ): def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) SCREAMING_SNAKE_CASE__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE__ = pipe.to('cuda' ) SCREAMING_SNAKE_CASE__ = '''Spiderman is surfing''' SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='pt' ).frames SCREAMING_SNAKE_CASE__ = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def A_ ( self : Dict ): SCREAMING_SNAKE_CASE__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) SCREAMING_SNAKE_CASE__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) SCREAMING_SNAKE_CASE__ = pipe.to('cuda' ) SCREAMING_SNAKE_CASE__ = '''Spiderman is surfing''' SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='pt' ).frames SCREAMING_SNAKE_CASE__ = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
350
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class lowercase__ ( _UpperCAmelCase ): A__ : Any =(CMStochasticIterativeScheduler,) A__ : Optional[int] =1_0 def A_ ( self : Dict , **UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE__ = { 'num_train_timesteps': 201, 'sigma_min': 0.002, 'sigma_max': 80.0, } config.update(**UpperCAmelCase_ ) return config def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = 10 SCREAMING_SNAKE_CASE__ = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0](**UpperCAmelCase_ ) scheduler.set_timesteps(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = scheduler.timesteps[0] SCREAMING_SNAKE_CASE__ = scheduler.timesteps[1] SCREAMING_SNAKE_CASE__ = self.dummy_sample SCREAMING_SNAKE_CASE__ = 0.1 * sample SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A_ ( self : List[str] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_ ) def A_ ( self : Any ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCAmelCase_ ) def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = 1 scheduler.set_timesteps(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = scheduler.timesteps SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = self.dummy_model() SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCAmelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample SCREAMING_SNAKE_CASE__ = pred_prev_sample SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = [106, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = scheduler.timesteps SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = self.dummy_model() SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample SCREAMING_SNAKE_CASE__ = pred_prev_sample SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = [39, 30, 12, 15, 0] with self.assertRaises(UpperCAmelCase_ , msg='`timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=UpperCAmelCase_ ) def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) with self.assertRaises(UpperCAmelCase_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ ) def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
169
0
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __A ( unittest.TestCase ): def __A ( self ): _lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(a__ ) _lowerCAmelCase : Dict = -1 _lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a__ ) _lowerCAmelCase : List[Any] = model.generate(a__ , max_new_tokens=10 , do_sample=a__ ) _lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase : Tuple = TextStreamer(a__ ) model.generate(a__ , max_new_tokens=10 , do_sample=a__ , streamer=a__ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase : Any = cs.out[:-1] self.assertEqual(a__ , a__ ) def __A ( self ): _lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(a__ ) _lowerCAmelCase : Optional[int] = -1 _lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a__ ) _lowerCAmelCase : Any = model.generate(a__ , max_new_tokens=10 , do_sample=a__ ) _lowerCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase : int = TextIteratorStreamer(a__ ) _lowerCAmelCase : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} _lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=a__ ) thread.start() _lowerCAmelCase : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(a__ , a__ ) def __A ( self ): _lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(a__ ) _lowerCAmelCase : str = -1 _lowerCAmelCase : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a__ ) _lowerCAmelCase : Any = model.generate(a__ , max_new_tokens=10 , do_sample=a__ ) _lowerCAmelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase : List[Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase : List[Any] = TextStreamer(a__ , skip_prompt=a__ ) model.generate(a__ , max_new_tokens=10 , do_sample=a__ , streamer=a__ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(a__ , a__ ) def __A ( self ): _lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) _lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(a__ ) _lowerCAmelCase : Optional[Any] = -1 _lowerCAmelCase : List[Any] = torch.ones((1, 5) , device=a__ ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase : Tuple = TextStreamer(a__ , skip_special_tokens=a__ ) model.generate(a__ , max_new_tokens=1 , do_sample=a__ , streamer=a__ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase : Optional[int] = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase : Union[str, Any] = tokenizer(a__ , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __A ( self ): _lowerCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(a__ ) _lowerCAmelCase : Dict = -1 _lowerCAmelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a__ ) _lowerCAmelCase : Any = TextIteratorStreamer(a__ , timeout=0.0_0_1 ) _lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} _lowerCAmelCase : List[str] = Thread(target=model.generate , kwargs=a__ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(a__ ): _lowerCAmelCase : List[Any] = """""" for new_text in streamer: streamer_text += new_text
44
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : Optional[Any] = 32 def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' return int(x / 2**20 ) class __lowercase : """simple docstring""" def __enter__( self ) -> Optional[Any]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCamelCase = torch.cuda.memory_allocated() return self def __exit__( self , *A ) -> int: '''simple docstring''' gc.collect() torch.cuda.empty_cache() lowerCamelCase = torch.cuda.memory_allocated() lowerCamelCase = torch.cuda.max_memory_allocated() lowerCamelCase = bamb(self.end - self.begin ) lowerCamelCase = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ): '''simple docstring''' lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ ) lowerCamelCase = load_dataset( """glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} ) def tokenize_function(lowerCamelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ): '''simple docstring''' lowerCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase = config["""lr"""] lowerCamelCase = int(config["""num_epochs"""] ) lowerCamelCase = int(config["""seed"""] ) lowerCamelCase = int(config["""batch_size"""] ) lowerCamelCase = args.model_name_or_path set_seed(lowerCamelCase__ ) lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ ) # Instantiate optimizer lowerCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCamelCase = 1 lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , ) else: lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # We need to keep track of how many total steps we have iterated over lowerCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase = 0 # Now we train the model lowerCamelCase = {} for epoch in range(lowerCamelCase__ , lowerCamelCase__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowerCamelCase__ ): lowerCamelCase = model(**lowerCamelCase__ ) lowerCamelCase = outputs.loss lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , ) lowerCamelCase = parser.parse_args() lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
252
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __snake_case ( _UpperCAmelCase ): monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def __snake_case ( _UpperCAmelCase ): class _A : def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = metric_id class _A : UpperCamelCase__ : Optional[int] = [MetricMock(__UpperCAmelCase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']] def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if "tmp_path" in args: __a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(a__ , match='''https://huggingface.co/docs/evaluate''' ): func(*a__ )
361
def __snake_case ( _UpperCAmelCase = 1000000 ): __a = limit + 1 __a = [0] * limit for first_term in range(1 , _UpperCAmelCase ): for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __a = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f'{solution() = }')
131
0
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Dict = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] )
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Dict = logging.get_logger(__name__) _lowercase : Any = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( a__ ): '''simple docstring''' _a = 'speech_to_text_2' _a = ['past_key_values'] _a = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : List[Any], lowerCamelCase : Dict=1_0000, lowerCamelCase : Optional[Any]=6, lowerCamelCase : Any=2048, lowerCamelCase : Optional[Any]=4, lowerCamelCase : List[str]=0.0, lowerCamelCase : str=True, lowerCamelCase : str="relu", lowerCamelCase : Any=256, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : List[Any]=0.0, lowerCamelCase : Tuple=0.02, lowerCamelCase : Dict=2, lowerCamelCase : Optional[int]=True, lowerCamelCase : str=1, lowerCamelCase : int=0, lowerCamelCase : Optional[int]=2, lowerCamelCase : Union[str, Any]=1024, **lowerCamelCase : int, )-> Union[str, Any]: lowerCamelCase__ : int =vocab_size lowerCamelCase__ : List[Any] =d_model lowerCamelCase__ : Union[str, Any] =decoder_ffn_dim lowerCamelCase__ : Tuple =decoder_layers lowerCamelCase__ : Dict =decoder_attention_heads lowerCamelCase__ : Any =dropout lowerCamelCase__ : List[Any] =attention_dropout lowerCamelCase__ : Any =activation_dropout lowerCamelCase__ : Dict =activation_function lowerCamelCase__ : Optional[Any] =init_std lowerCamelCase__ : Optional[Any] =decoder_layerdrop lowerCamelCase__ : int =use_cache lowerCamelCase__ : List[Any] =decoder_layers lowerCamelCase__ : str =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase__ : Any =max_target_positions super().__init__( pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, decoder_start_token_id=lowerCamelCase, **lowerCamelCase, )
370
"""simple docstring""" from collections import defaultdict class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Optional[int]: lowerCamelCase__ : List[Any] =total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 lowerCamelCase__ : Optional[Any] =[ [-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) ) ] lowerCamelCase__ : Any =defaultdict(lowerCamelCase ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 lowerCamelCase__ : List[Any] =(1 << len(lowerCamelCase )) - 1 def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Any )-> Any: # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement lowerCamelCase__ : Optional[int] =self.count_ways_until(lowerCamelCase, task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 ) # save the value. lowerCamelCase__ : int =total_ways_util return self.dp[mask][task_no] def snake_case ( self : Dict, lowerCamelCase : Dict )-> int: # Store the list of persons for each task for i in range(len(lowerCamelCase ) ): for j in task_performed[i]: self.task[j].append(lowerCamelCase ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0, 1 ) if __name__ == "__main__": _lowercase : Tuple = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
272
0
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" __a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __a = 128 elif "12-12" in model_name: __a = 12 __a = 12 elif "14-14" in model_name: __a = 14 __a = 14 elif "16-16" in model_name: __a = 16 __a = 16 else: raise ValueError("""Model not supported""" ) __a = """huggingface/label-files""" if "speech-commands" in model_name: __a = 35 __a = """speech-commands-v2-id2label.json""" else: __a = 527 __a = """audioset-id2label.json""" __a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) __a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if "module.v" in name: __a = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: __a = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: __a = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: __a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __a = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: __a = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __a = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __a = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __a = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __a = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __a = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: __a = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): __a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: __a = key.split(""".""" ) __a = int(key_split[3] ) __a = config.hidden_size if "weight" in key: __a = val[:dim, :] __a = val[dim : dim * 2, :] __a = val[-dim:, :] else: __a = val[:dim] __a = val[dim : dim * 2] __a = val[-dim:] else: __a = val return orig_state_dict def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" __a = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ): """simple docstring""" __a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE ) __a = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __a = model_name_to_url[model_name] __a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # remove some keys remove_keys(_SCREAMING_SNAKE_CASE ) # rename some keys __a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load 🤗 model __a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978 __a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526 __a = 1024 if """speech-commands""" not in model_name else 128 __a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: __a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) __a = dataset[0]["""audio"""]["""array"""] else: __a = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) __a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE ) __a = waveform.squeeze().numpy() __a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" ) # forward pass __a = model(**_SCREAMING_SNAKE_CASE ) __a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __a = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __a = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __a = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __a = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __a = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __a = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __a = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __a = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f"MIT/{model_name}" ) feature_extractor.push_to_hub(f"MIT/{model_name}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
302
1
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore _lowerCAmelCase :int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" _lowerCAmelCase :Optional[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(f"{len(upper_files)} files contain uppercase characters:") print('\n'.join(upper_files) + '\n') _lowerCAmelCase :Union[str, Any] = [file for file in filepaths if ' ' in file] if space_files: print(f"{len(space_files)} files contain space characters:") print('\n'.join(space_files) + '\n') _lowerCAmelCase :str = [file for file in filepaths if '-' in file] if hyphen_files: print(f"{len(hyphen_files)} files contain hyphen characters:") print('\n'.join(hyphen_files) + '\n') _lowerCAmelCase :int = [file for file in filepaths if os.sep not in file] if nodir_files: print(f"{len(nodir_files)} files are not in a directory:") print('\n'.join(nodir_files) + '\n') _lowerCAmelCase :int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
364
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict: _UpperCAmelCase : Dict = parent _UpperCAmelCase : int = batch_size _UpperCAmelCase : Union[str, Any] = seq_length _UpperCAmelCase : str = is_training _UpperCAmelCase : Optional[Any] = use_input_mask _UpperCAmelCase : List[Any] = use_token_type_ids _UpperCAmelCase : str = use_labels _UpperCAmelCase : List[str] = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : List[str] = num_attention_heads _UpperCAmelCase : Union[str, Any] = intermediate_size _UpperCAmelCase : str = hidden_act _UpperCAmelCase : Optional[int] = hidden_dropout_prob _UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob _UpperCAmelCase : str = max_position_embeddings _UpperCAmelCase : Optional[int] = type_vocab_size _UpperCAmelCase : List[str] = type_sequence_label_size _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : Optional[int] = num_labels _UpperCAmelCase : int = num_choices _UpperCAmelCase : Dict = scope def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : int = None if self.use_input_mask: _UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : int = None _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[Any] = None if self.use_labels: _UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Union[str, Any]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple: _UpperCAmelCase : int = DistilBertModel(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[int] = model(A , A ) _UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Any: _UpperCAmelCase : int = DistilBertForMaskedLM(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Any = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]: _UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[int] = model( A , attention_mask=A , start_positions=A , end_positions=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str: _UpperCAmelCase : List[Any] = self.num_labels _UpperCAmelCase : Union[str, Any] = DistilBertForSequenceClassification(A ) model.to(A ) model.eval() _UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]: _UpperCAmelCase : Any = self.num_labels _UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]: _UpperCAmelCase : str = self.num_choices _UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase : Optional[Any] = model( A , attention_mask=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : int = config_and_inputs _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) a__ =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) a__ =True a__ =True a__ =True a__ =True def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Union[str, Any] = DistilBertModelTester(self ) _UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , dim=3_7 ) def __lowerCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*A ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*A ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*A ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*A ) def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*A ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained(A ) self.assertIsNotNone(A ) @slow @require_torch_gpu def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return _UpperCAmelCase : Tuple = True _UpperCAmelCase : Union[str, Any] = model_class(config=A ) _UpperCAmelCase : List[Any] = self._prepare_for_class(A , A ) _UpperCAmelCase : int = torch.jit.trace( A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) ) _UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A ) loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) _UpperCAmelCase : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) _UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase : str = model(A , attention_mask=A )[0] _UpperCAmelCase : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , A ) _UpperCAmelCase : Any = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
68
0
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class A_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any]=100 , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=30 , lowerCamelCase_ :int=2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=32 , lowerCamelCase_ :List[str]=5 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Any=37 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :int=10 , lowerCamelCase_ :Dict=0.02 , lowerCamelCase_ :Dict=3 , ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =parent lowerCamelCase__ : str =vocab_size lowerCamelCase__ : int =batch_size lowerCamelCase__ : Any =image_size lowerCamelCase__ : Any =patch_size lowerCamelCase__ : Optional[int] =num_channels lowerCamelCase__ : Any =is_training lowerCamelCase__ : List[str] =use_labels lowerCamelCase__ : Tuple =hidden_size lowerCamelCase__ : Dict =num_hidden_layers lowerCamelCase__ : str =num_attention_heads lowerCamelCase__ : List[Any] =intermediate_size lowerCamelCase__ : Any =hidden_act lowerCamelCase__ : Optional[int] =hidden_dropout_prob lowerCamelCase__ : Optional[Any] =attention_probs_dropout_prob lowerCamelCase__ : List[str] =type_sequence_label_size lowerCamelCase__ : List[Any] =initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : List[str] =(image_size // patch_size) ** 2 lowerCamelCase__ : Union[str, Any] =num_patches + 1 def UpperCAmelCase__ ( self :Union[str, Any] ): """simple docstring""" lowerCamelCase__ : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : List[Any] =None if self.use_labels: lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] =BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , ) return config, pixel_values, labels def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): """simple docstring""" lowerCamelCase__ : List[Any] =FlaxBeitModel(config=SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : int =model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :List[str] ): """simple docstring""" lowerCamelCase__ : List[Any] =FlaxBeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Optional[Any] =model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] ): """simple docstring""" lowerCamelCase__ : List[Any] =self.type_sequence_label_size lowerCamelCase__ : List[str] =FlaxBeitForImageClassification(config=SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Any =model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : Optional[int] =1 lowerCamelCase__ : str =FlaxBeitForImageClassification(SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Optional[int] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : List[Any] =model(SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self :Tuple ): """simple docstring""" lowerCamelCase__ : int =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : List[Any] =config_and_inputs lowerCamelCase__ : Optional[Any] ={'pixel_values': pixel_values} return config, inputs_dict @require_flax class A_ ( __lowercase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def UpperCAmelCase__ ( self :int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =FlaxBeitModelTester(self ) lowerCamelCase__ : int =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def UpperCAmelCase__ ( self :Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self :List[Any] ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Tuple =inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Optional[Any] =[*signature.parameters.keys()] lowerCamelCase__ : Dict =['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self :Union[str, Any] ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase__ : List[Any] =self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : int =model_class(SCREAMING_SNAKE_CASE__ ) @jax.jit def model_jitted(lowerCamelCase_ :str , **lowerCamelCase_ :Dict ): return model(pixel_values=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with self.subTest('JIT Enabled' ): lowerCamelCase__ : Optional[Any] =model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowerCamelCase__ : Any =model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase__ ( self :Any ): """simple docstring""" lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self :Any ): """simple docstring""" lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self :Optional[int] ): """simple docstring""" lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) @slow def UpperCAmelCase__ ( self :List[Any] ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase__ : int =model_class_name.from_pretrained('microsoft/beit-base-patch16-224' ) lowerCamelCase__ : int =model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase_ ( ) ->List[str]: lowerCamelCase__ : Dict =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @require_flax class A_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase__ ( self :List[str] ): """simple docstring""" return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self :int ): """simple docstring""" lowerCamelCase__ : List[str] =FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ) lowerCamelCase__ : List[str] =self.default_image_processor lowerCamelCase__ : List[str] =prepare_img() lowerCamelCase__ : Tuple =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' ).pixel_values # prepare bool_masked_pos lowerCamelCase__ : Optional[int] =np.ones((1, 196) , dtype=SCREAMING_SNAKE_CASE__ ) # forward pass lowerCamelCase__ : int =model(pixel_values=SCREAMING_SNAKE_CASE__ , bool_masked_pos=SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Union[str, Any] =outputs.logits # verify the logits lowerCamelCase__ : int =(1, 196, 8_192) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : str =np.array( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-2 ) ) @slow def UpperCAmelCase__ ( self :int ): """simple docstring""" lowerCamelCase__ : Dict =FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ) lowerCamelCase__ : int =self.default_image_processor lowerCamelCase__ : Any =prepare_img() lowerCamelCase__ : Union[str, Any] =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' ) # forward pass lowerCamelCase__ : Dict =model(**SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Tuple =outputs.logits # verify the logits lowerCamelCase__ : Dict =(1, 1_000) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : int =np.array([-1.23_85, -1.09_87, -1.01_08] ) self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) lowerCamelCase__ : Any =281 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ ) @slow def UpperCAmelCase__ ( self :Dict ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ) lowerCamelCase__ : List[str] =self.default_image_processor lowerCamelCase__ : int =prepare_img() lowerCamelCase__ : Tuple =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' ) # forward pass lowerCamelCase__ : List[str] =model(**SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : List[Any] =outputs.logits # verify the logits lowerCamelCase__ : str =(1, 21_841) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ : Dict =np.array([1.68_81, -0.27_87, 0.59_01] ) self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) lowerCamelCase__ : Optional[int] =2_396 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
126
import pprint import requests SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api" def __magic_name__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def __magic_name__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = random_quotes() pprint.pprint(response)
270
0
"""simple docstring""" from __future__ import annotations import math def A_ ( _lowerCAmelCase : int ): """simple docstring""" if num <= 0: _a = f'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCAmelCase__ ) _a = [True] * (num + 1) _a = [] _a = 2 _a = int(math.sqrt(lowerCAmelCase__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCAmelCase__ ) # Set multiples of start be False for i in range(start * start, num + 1, lowerCAmelCase__ ): if sieve[i] is True: _a = False start += 1 for j in range(end + 1, num + 1 ): if sieve[j] is True: prime.append(lowerCAmelCase__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
360
"""simple docstring""" def A_ ( _lowerCAmelCase : int ): """simple docstring""" if p < 2: raise ValueError('''p should not be less than 2!''' ) elif p == 2: return True _a = 4 _a = (1 << p) - 1 for _ in range(p - 2 ): _a = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
153
0
'''simple docstring''' import os import sys _lowerCAmelCase = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _lowerCAmelCase = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoConfig.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoTokenizer.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModel.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModel.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForCausalLM.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForMaskedLM.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForSequenceClassification.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForQuestionAnswering.from_pretrained(*snake_case__ , **snake_case__ )
298
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : List[str] = 1_3 __UpperCamelCase : List[Any] = 7 __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = True __UpperCamelCase : str = True __UpperCamelCase : List[Any] = 9_9 __UpperCamelCase : Union[str, Any] = 3_8_4 __UpperCamelCase : str = 2 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : Any = 3_7 __UpperCamelCase : str = "gelu" __UpperCamelCase : Optional[Any] = 0.1 __UpperCamelCase : str = 0.1 __UpperCamelCase : str = 5_1_2 __UpperCamelCase : Optional[Any] = 1_6 __UpperCamelCase : Dict = 2 __UpperCamelCase : Optional[int] = 0.02 __UpperCamelCase : List[Any] = 3 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : int = 1_2_8 __UpperCamelCase : Tuple = 2 __UpperCamelCase : str = 9 __UpperCamelCase : List[Any] = 1 __UpperCamelCase : Any = None def a_ (self ) -> int: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = None if self.use_token_type_ids: __UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : List[Any] = None __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Optional[Any] = None if self.use_labels: __UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __UpperCamelCase : Optional[Any] = [input_ids, input_mask] __UpperCamelCase : str = model(_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.num_labels __UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __UpperCamelCase : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Optional[int] = self.num_choices __UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = self.num_labels __UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self ) -> str: __UpperCamelCase : str = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Any = config_and_inputs __UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = TFConvBertModelTester(self ) __UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> Dict: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : int = True if hasattr(_UpperCAmelCase , "use_cache" ): __UpperCamelCase : List[Any] = True __UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : int = model_class(_UpperCAmelCase ) __UpperCamelCase : Any = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" ) __UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase ) __UpperCamelCase : Dict = model(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : Any = outputs["encoder_hidden_states"] __UpperCamelCase : Tuple = outputs["encoder_attentions"] else: __UpperCamelCase : Tuple = outputs["hidden_states"] __UpperCamelCase : Optional[int] = outputs["attentions"] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __UpperCamelCase : Any = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) __UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Dict = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __UpperCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __UpperCamelCase : Any = True __UpperCamelCase : Dict = False __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = model_class(_UpperCAmelCase ) __UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __UpperCamelCase : int = True __UpperCamelCase : str = True __UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> str: __UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) __UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0] __UpperCamelCase : Tuple = [1, 6, 7_6_8] self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Any = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
298
1
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) a__ = 299792458 # Symbols a__ , a__ , a__ , a__ = symbols('''ct x y z''') def __UpperCAmelCase ( __a : float ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def __UpperCAmelCase ( __a : float ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(__a ) ** 2 ) def __UpperCAmelCase ( __a : float ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0], [-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def __UpperCAmelCase ( __a : float ,__a : np.ndarray | None = None ) -> np.ndarray: """simple docstring""" if event is None: _a : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(__a ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: a__ = transform(29979245) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values a__ = {ct: c, x: 1, y: 1, z: 1} a__ = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
15
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : Dict = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : int = 2 * i + 1 _a : str = 2 * i _a : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
15
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """char""" lowerCamelCase_ : Union[str, Any] = """bpe""" lowerCamelCase_ : Optional[Any] = """wp""" SCREAMING_SNAKE_CASE__ : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = ["""image_processor""", """char_tokenizer"""] lowerCamelCase_ : Optional[int] = """ViTImageProcessor""" lowerCamelCase_ : List[Any] = """MgpstrTokenizer""" def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase__ , ) lowerCamelCase : List[str] = kwargs.pop("feature_extractor" ) lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowerCamelCase : Any = tokenizer lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]: if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowerCamelCase : Optional[int] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: lowerCamelCase : Tuple = self.char_tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase : Dict = encodings["input_ids"] return inputs def _lowercase ( self , UpperCamelCase__ ) -> str: lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = sequences lowerCamelCase : Any = char_preds.size(0 ) lowerCamelCase , lowerCamelCase : Any = self._decode_helper(UpperCamelCase__ , "char" ) lowerCamelCase , lowerCamelCase : List[Any] = self._decode_helper(UpperCamelCase__ , "bpe" ) lowerCamelCase , lowerCamelCase : Tuple = self._decode_helper(UpperCamelCase__ , "wp" ) lowerCamelCase : Union[str, Any] = [] lowerCamelCase : str = [] for i in range(UpperCamelCase__ ): lowerCamelCase : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCamelCase : str = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCamelCase : Optional[int] = scores.index(max(UpperCamelCase__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCamelCase : Optional[Any] = {} lowerCamelCase : List[str] = final_strs lowerCamelCase : str = final_scores lowerCamelCase : Optional[int] = char_strs lowerCamelCase : Union[str, Any] = bpe_strs lowerCamelCase : Tuple = wp_strs return out def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: if format == DecodeType.CHARACTER: lowerCamelCase : Union[str, Any] = self.char_decode lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = "[s]" elif format == DecodeType.BPE: lowerCamelCase : Dict = self.bpe_decode lowerCamelCase : List[str] = 2 lowerCamelCase : List[Any] = "#" elif format == DecodeType.WORDPIECE: lowerCamelCase : Any = self.wp_decode lowerCamelCase : List[str] = 102 lowerCamelCase : Any = "[SEP]" else: raise ValueError(F'''Format {format} is not supported.''' ) lowerCamelCase , lowerCamelCase : List[str] = [], [] lowerCamelCase : List[Any] = pred_logits.size(0 ) lowerCamelCase : Union[str, Any] = pred_logits.size(1 ) lowerCamelCase , lowerCamelCase : Optional[int] = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase__ , sorted=UpperCamelCase__ ) lowerCamelCase : Tuple = preds_index.view(-1 , UpperCamelCase__ )[:, 1:] lowerCamelCase : str = decoder(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : str = torch.nn.functional.softmax(UpperCamelCase__ , dim=2 ).max(dim=2 ) lowerCamelCase : Optional[int] = preds_max_prob[:, 1:] for index in range(UpperCamelCase__ ): lowerCamelCase : List[str] = preds_str[index].find(UpperCamelCase__ ) lowerCamelCase : int = preds_str[index][:pred_eos] lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() lowerCamelCase : List[Any] = pred_index.index(UpperCamelCase__ ) if eos_token in pred_index else -1 lowerCamelCase : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(UpperCamelCase__ ) conf_scores.append(UpperCamelCase__ ) return dec_strs, conf_scores def _lowercase ( self , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Optional[int] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase__ )] return decode_strs def _lowercase ( self , UpperCamelCase__ ) -> str: return self.bpe_tokenizer.batch_decode(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Tuple = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase__ )] return decode_strs
48
'''simple docstring''' def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = set() # edges = list of graph's edges UpperCAmelCase : str = get_edges(UpperCAmelCase_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = edges.pop() chosen_vertices.add(UpperCAmelCase_ ) chosen_vertices.add(UpperCAmelCase_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(UpperCAmelCase_ ) return chosen_vertices def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : List[str] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
151
0
import logging import os import threading import time try: import warnings except ImportError: UpperCamelCase__ : List[Any] = None try: import msvcrt except ImportError: UpperCamelCase__ : Optional[Any] = None try: import fcntl except ImportError: UpperCamelCase__ : Optional[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: UpperCamelCase__ : List[str] = OSError # Data # ------------------------------------------------ UpperCamelCase__ : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] UpperCamelCase__ : str = """3.0.12""" UpperCamelCase__ : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( ) -> int: """simple docstring""" global _logger a = _logger or logging.getLogger(__name__ ) return _logger class lowerCamelCase_ ( a_ ): def __init__( self : Optional[Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = lock_file return None def __str__( self : List[Any] ): '''simple docstring''' a = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : Tuple ): '''simple docstring''' a = lock return None def __enter__( self : int ): '''simple docstring''' return self.lock def __exit__( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any] ): '''simple docstring''' self.lock.release() return None class lowerCamelCase_ : def __init__( self : str ,__lowerCamelCase : List[str] ,__lowerCamelCase : Union[str, Any]=-1 ,__lowerCamelCase : Optional[int]=None ): '''simple docstring''' a = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long a = self.hash_filename_if_too_long(__lowerCamelCase ,__lowerCamelCase ) # The path to the lock file. a = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. a = None # The default timeout value. a = timeout # We use this lock primarily for the lock counter. a = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. a = 0 return None @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = float(__lowerCamelCase ) return None def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Dict=None ,__lowerCamelCase : str=0.05 ): '''simple docstring''' if timeout is None: a = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 a = id(self ) a = self._lock_file a = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__lowerCamelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: a = max(0 ,self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: a = id(self ) a = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() a = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : Union[str, Any] ): '''simple docstring''' self.acquire() return self def __exit__( self : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' self.release() return None def __del__( self : Dict ): '''simple docstring''' self.release(force=__lowerCamelCase ) return None def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ,__lowerCamelCase : int ): '''simple docstring''' a = os.path.basename(__lowerCamelCase ) if len(__lowerCamelCase ) > max_length and max_length > 0: a = os.path.dirname(__lowerCamelCase ) a = str(hash(__lowerCamelCase ) ) a = filename[: max_length - len(__lowerCamelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__lowerCamelCase ,__lowerCamelCase ) else: return path class lowerCamelCase_ ( a_ ): def __init__( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Any=-1 ,__lowerCamelCase : Tuple=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__lowerCamelCase ,timeout=__lowerCamelCase ,max_filename_length=__lowerCamelCase ) a = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: a = os.open(self._lock_file ,__lowerCamelCase ) except OSError: pass else: try: msvcrt.locking(__lowerCamelCase ,msvcrt.LK_NBLCK ,1 ) except OSError: os.close(__lowerCamelCase ) else: a = fd return None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = self._lock_file_fd a = None msvcrt.locking(__lowerCamelCase ,msvcrt.LK_UNLCK ,1 ) os.close(__lowerCamelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class lowerCamelCase_ ( a_ ): def __init__( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Tuple=-1 ,__lowerCamelCase : Tuple=None ): '''simple docstring''' a = os.statvfs(os.path.dirname(__lowerCamelCase ) ).f_namemax super().__init__(__lowerCamelCase ,timeout=__lowerCamelCase ,max_filename_length=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = os.O_RDWR | os.O_CREAT | os.O_TRUNC a = os.open(self._lock_file ,__lowerCamelCase ) try: fcntl.flock(__lowerCamelCase ,fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__lowerCamelCase ) else: a = fd return None def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self._lock_file_fd a = None fcntl.flock(__lowerCamelCase ,fcntl.LOCK_UN ) os.close(__lowerCamelCase ) return None class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: a = os.open(self._lock_file ,__lowerCamelCase ) except OSError: pass else: a = fd return None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' os.close(self._lock_file_fd ) a = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None UpperCamelCase__ : Any = None if msvcrt: UpperCamelCase__ : Tuple = WindowsFileLock elif fcntl: UpperCamelCase__ : Any = UnixFileLock else: UpperCamelCase__ : Union[str, Any] = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase__ : Any = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCamelCase__ : Optional[Any] = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCamelCase__ : Optional[Any] = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : List[str] = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : Optional[int] = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for tf_name, hf_name in patterns: a = k.replace(snake_case_, snake_case_ ) return k def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" a = BigBirdPegasusConfig(**snake_case_ ) a = BigBirdPegasusForConditionalGeneration(snake_case_ ) a = torch_model.state_dict() a = {} # separating decoder weights a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = DECODER_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = REMAINING_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a = mapping['''model.embed_positions.weight'''] a = mapping.pop('''model.embed_positions.weight''' ) a , a = torch_model.load_state_dict(snake_case_, strict=snake_case_ ) a = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a = tf.train.list_variables(snake_case_ ) a = {} a = ['''global_step'''] for name, shape in tqdm(snake_case_, desc='''converting tf checkpoint to dict''' ): a = any(pat in name for pat in ignore_name ) if skip_key: continue a = tf.train.load_variable(snake_case_, snake_case_ ) a = array return tf_weights def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" a = get_tf_weights_as_numpy(snake_case_ ) a = convert_bigbird_pegasus(snake_case_, snake_case_ ) torch_model.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCamelCase__ : int = parser.parse_args() UpperCamelCase__ : Tuple = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: SCREAMING_SNAKE_CASE__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCamelCase ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class lowercase__ : def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[Any]="relu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Any=20 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Any=0 , ): SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = encoder_layerdrop SCREAMING_SNAKE_CASE__ = decoder_layerdrop SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = eos_token_id SCREAMING_SNAKE_CASE__ = pad_token_id SCREAMING_SNAKE_CASE__ = bos_token_id def A_ ( self : Any ): SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = self.eos_token_id # Eos Token SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input SCREAMING_SNAKE_CASE__ = input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE__ = decoder_input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE__ = self.get_config() SCREAMING_SNAKE_CASE__ = prepare_mam_aaa_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return config, inputs_dict def A_ ( self : Dict ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() return config, inputs_dict def A_ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE__ = MaMaaaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval() SCREAMING_SNAKE_CASE__ = inputs_dict['input_ids'] SCREAMING_SNAKE_CASE__ = inputs_dict['attention_mask'] SCREAMING_SNAKE_CASE__ = inputs_dict['head_mask'] # first forward pass SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )['last_hidden_state'] SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[ 'last_hidden_state' ] # select random slice SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-2 ) ) def A_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE__ = MaMaaaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval() SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = outputs.encoder_last_hidden_state SCREAMING_SNAKE_CASE__ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = model.get_encoder() encoder.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = MaMaaaEncoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = model.get_decoder() decoder.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = MaMaaaDecoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = decoder( input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=inputs_dict['attention_mask'] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A__ : List[str] =( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) A__ : int =(MaMaaaForConditionalGeneration,) if is_torch_available() else () A__ : Union[str, Any] =( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) A__ : Union[str, Any] =True A__ : Tuple =True A__ : Optional[int] =False A__ : Optional[int] =False def A_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def A_ ( self : Any ): SCREAMING_SNAKE_CASE__ = MaMaaaModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ ) def A_ ( self : List[str] ): self.config_tester.run_common_tests() def A_ ( self : str ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertEqual(info['missing_keys'] , [] ) def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase_ ) def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_ ) def A_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE__ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) if not self.is_encoder_decoder: SCREAMING_SNAKE_CASE__ = inputs['input_ids'] del inputs["input_ids"] else: SCREAMING_SNAKE_CASE__ = inputs['input_ids'] SCREAMING_SNAKE_CASE__ = inputs.get('decoder_input_ids' , UpperCAmelCase_ ) del inputs["input_ids"] inputs.pop('decoder_input_ids' , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = model.get_input_embeddings() if not self.is_encoder_decoder: SCREAMING_SNAKE_CASE__ = wte(UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE__ = wte(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = wte(UpperCAmelCase_ ) with torch.no_grad(): model(**UpperCAmelCase_ )[0] def A_ ( self : Any ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ = input_dict['input_ids'] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration(UpperCAmelCase_ ).eval().to(UpperCAmelCase_ ) if torch_device == "cuda": model.half() model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) model.generate(num_beams=4 , do_sample=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , num_return_sequences=3 ) def _lowercase ( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' return torch.tensor(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase ) __snake_case = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class lowercase__ ( unittest.TestCase ): @cached_property def A_ ( self : List[str] ): return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ) def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) SCREAMING_SNAKE_CASE__ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) SCREAMING_SNAKE_CASE__ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , UpperCAmelCase_ ) # change to expected output here SCREAMING_SNAKE_CASE__ = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase_ ) # change to intended input SCREAMING_SNAKE_CASE__ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) SCREAMING_SNAKE_CASE__ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) SCREAMING_SNAKE_CASE__ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase_ ) # change to expected output here SCREAMING_SNAKE_CASE__ = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' ) SCREAMING_SNAKE_CASE__ = [ 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent' ' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de' ' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.', ] # The below article tests that we don't add any hypotheses outside of the top n_beams SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ = model.generate( input_ids=dct['input_ids'].to(UpperCAmelCase_ ) , attention_mask=dct['attention_mask'].to(UpperCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , ) SCREAMING_SNAKE_CASE__ = [ 'The NSA case highlights the total absence of intelligence debate', 'I think there are two levels of response from the French government.', 'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.' ' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all' ' communications in France.', ] SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) assert generated == expected_en
176
__UpperCamelCase : Optional[int] = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
228
0
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , _A : str , _A : List[str]=13 , _A : Union[str, Any]=30 , _A : Dict=2 , _A : str=3 , _A : List[Any]=True , _A : Any=True , _A : Tuple=32 , _A : List[Any]=5 , _A : Any=4 , _A : Optional[int]=37 , _A : Tuple="gelu" , _A : Optional[int]=0.1 , _A : Optional[Any]=0.1 , _A : Dict=10 , _A : int=0.0_2 , ) -> Optional[int]: """simple docstring""" snake_case_ : Tuple = parent snake_case_ : Dict = batch_size snake_case_ : int = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Optional[int] = num_channels snake_case_ : Tuple = is_training snake_case_ : Union[str, Any] = use_labels snake_case_ : Any = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : str = hidden_act snake_case_ : Any = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : List[Any] = type_sequence_label_size snake_case_ : Tuple = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ : Optional[Any] = (image_size // patch_size) ** 2 snake_case_ : Optional[int] = num_patches + 1 def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Dict = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) return config, pixel_values def UpperCAmelCase_ ( self : str , _A : Any , _A : Tuple ) -> Any: """simple docstring""" snake_case_ : str = FlaxViTModel(config=_A ) snake_case_ : Dict = model(_A ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) snake_case_ : Any = (self.image_size, self.image_size) snake_case_ : List[str] = (self.patch_size, self.patch_size) snake_case_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] ) -> str: """simple docstring""" snake_case_ : str = self.type_sequence_label_size snake_case_ : List[Any] = FlaxViTForImageClassification(config=_A ) snake_case_ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : List[Any] = 1 snake_case_ : Dict = FlaxViTForImageClassification(_A ) snake_case_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Optional[int] = model(_A ) def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[int] = self.prepare_config_and_inputs() ( ( snake_case_ ) ,( snake_case_ ) , ) : Optional[int] = config_and_inputs snake_case_ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: int = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase_ ( self : Dict ) -> None: """simple docstring""" snake_case_ : int = FlaxViTModelTester(self ) snake_case_ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> str: """simple docstring""" snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def UpperCAmelCase_ ( self : str ) -> List[str]: """simple docstring""" snake_case_ ,snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : str = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[Any] = [*signature.parameters.keys()] snake_case_ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model_class(_A ) @jax.jit def model_jitted(_A : int , **_A : Optional[Any] ): return model(pixel_values=_A , **_A ) with self.subTest('JIT Enabled' ): snake_case_ : List[Any] = model_jitted(**_A ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): snake_case_ : Any = model_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ : Optional[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) snake_case_ : Optional[Any] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(_A )
88
_SCREAMING_SNAKE_CASE = { """Pillow""": """Pillow""", """accelerate""": """accelerate>=0.11.0""", """compel""": """compel==0.1.8""", """black""": """black~=23.1""", """datasets""": """datasets""", """filelock""": """filelock""", """flax""": """flax>=0.4.1""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.13.2""", """requests-mock""": """requests-mock==1.10.0""", """importlib_metadata""": """importlib_metadata""", """invisible-watermark""": """invisible-watermark""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2""", """jaxlib""": """jaxlib>=0.1.65""", """Jinja2""": """Jinja2""", """k-diffusion""": """k-diffusion>=0.0.12""", """torchsde""": """torchsde""", """note_seq""": """note_seq""", """librosa""": """librosa""", """numpy""": """numpy""", """omegaconf""": """omegaconf""", """parameterized""": """parameterized""", """protobuf""": """protobuf>=3.20.3,<4""", """pytest""": """pytest""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """ruff""": """ruff>=0.0.241""", """safetensors""": """safetensors""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """scipy""": """scipy""", """onnx""": """onnx""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """tensorboard""": """tensorboard""", """torch""": """torch>=1.4""", """torchvision""": """torchvision""", """transformers""": """transformers>=4.25.1""", """urllib3""": """urllib3<=2.0.0""", }
88
1
from __future__ import annotations class __snake_case : def __init__( self : Tuple , _snake_case : Dict=None): """simple docstring""" UpperCAmelCase_ = data UpperCAmelCase_ = None def __repr__( self : str): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = self while temp: string_rep.append(F"""{temp.data}""") UpperCAmelCase_ = temp.next return "->".join(_snake_case) def A (__A : list ) -> List[str]: """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) UpperCAmelCase_ = UpperCAmelCase_ = Node(elements_list[0] ) for i in range(1 , len(__A ) ): UpperCAmelCase_ = Node(elements_list[i] ) UpperCAmelCase_ = current.next return head def A (__A : Node ) -> None: """simple docstring""" if head_node is not None and isinstance(__A , __A ): print_reverse(head_node.next ) print(head_node.data ) def A () -> List[str]: """simple docstring""" from doctest import testmod testmod() UpperCAmelCase_ = make_linked_list([14, 52, 14, 12, 43] ) print('''Linked List:''' ) print(__A ) print('''Elements in Reverse:''' ) print_reverse(__A ) if __name__ == "__main__": main()
51
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( _lowercase): _a : Union[str, Any] = ['''image_processor''', '''tokenizer'''] _a : List[Any] = '''ChineseCLIPImageProcessor''' _a : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> List[str]: lowerCAmelCase__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' ) lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Dict = self.image_processor def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , **_SCREAMING_SNAKE_CASE : Dict )-> List[Any]: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCAmelCase__ : List[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if images is not None: lowerCAmelCase__ : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None and images is not None: lowerCAmelCase__ : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : Dict , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] )-> Any: return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Any )-> int: return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]: lowerCAmelCase__ : Any = self.tokenizer.model_input_names lowerCAmelCase__ : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase__( self : str )-> List[str]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _SCREAMING_SNAKE_CASE , ) return self.image_processor_class
131
0
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets _snake_case = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' _snake_case = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' _snake_case = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def _A ( snake_case , snake_case ) -> int: return float((preds == labels).mean() ) def _A ( snake_case , snake_case ) -> Union[str, Any]: _lowercase : Any = simple_accuracy(snake_case , snake_case ) _lowercase : List[Any] = float(fa_score(y_true=snake_case , y_pred=snake_case ) ) return { "accuracy": acc, "f1": fa, } def _A ( snake_case , snake_case ) -> List[str]: _lowercase : Any = np.array(snake_case ) _lowercase : Any = np.array(snake_case ) _lowercase : str = en_sentvecs.shape[0] # mean centering _lowercase : List[Any] = en_sentvecs - np.mean(snake_case , axis=0 ) _lowercase : Tuple = in_sentvecs - np.mean(snake_case , axis=0 ) _lowercase : Any = cdist(snake_case , snake_case , "cosine" ) _lowercase : Any = np.array(range(snake_case ) ) _lowercase : Dict = sim.argsort(axis=1 )[:, :10] _lowercase : List[str] = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def _lowerCamelCase ( self ): """simple docstring""" if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_UpperCamelCase , _UpperCamelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_UpperCamelCase , _UpperCamelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_UpperCamelCase , _UpperCamelCase )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
369
'''simple docstring''' def _A ( snake_case , snake_case ) -> int: return int((input_a, input_a).count(0 ) != 0 ) def _A ( ) -> None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
199
0
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __SCREAMING_SNAKE_CASE ='scheduler_config.json' class UpperCamelCase ( lowerCamelCase__ ): lowercase = 1 lowercase = 2 lowercase = 3 lowercase = 4 lowercase = 5 lowercase = 6 lowercase = 7 lowercase = 8 lowercase = 9 lowercase = 1_0 lowercase = 1_1 lowercase = 1_2 lowercase = 1_3 lowercase = 1_4 @dataclass class UpperCamelCase ( lowerCamelCase__ ): lowercase = 4_2 class UpperCamelCase : lowercase = SCHEDULER_CONFIG_NAME lowercase = [] lowercase = True @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> List[Any]: '''simple docstring''' lowercase_ : Tuple = cls.load_config( pretrained_model_name_or_path=__lowerCamelCase ,subfolder=__lowerCamelCase ,return_unused_kwargs=__lowerCamelCase ,return_commit_hash=__lowerCamelCase ,**__lowerCamelCase ,) return cls.from_config(__lowerCamelCase ,return_unused_kwargs=__lowerCamelCase ,**__lowerCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' self.save_config(save_directory=__lowerCamelCase ,push_to_hub=__lowerCamelCase ,**__lowerCamelCase ) @property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return self._get_compatibles() @classmethod def _UpperCAmelCase ( cls ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = list(set([cls.__name__] + cls._compatibles ) ) lowercase_ : Tuple = importlib.import_module(__name__.split('.' )[0] ) lowercase_ : Union[str, Any] = [ getattr(__lowerCamelCase ,__lowerCamelCase ) for c in compatible_classes_str if hasattr(__lowerCamelCase ,__lowerCamelCase ) ] return compatible_classes
213
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =(DPMSolverSDEScheduler,) snake_case_ =10 def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[str] = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__lowerCamelCase ) return config def lowerCAmelCase__ (self ) -> int: """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : List[str] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Dict = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : List[Any] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Optional[int] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : str = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : List[Any] = output.prev_sample lowerCAmelCase__ : List[str] = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : List[Any] = self.get_scheduler_config() lowerCAmelCase__ : Union[str, Any] = scheduler_class(**__lowerCamelCase ,use_karras_sigmas=__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase ) lowerCAmelCase__ : str = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : str = output.prev_sample lowerCAmelCase__ : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
129
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : Dict = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class A ( __snake_case ): __magic_name__ = '''canine''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=16384 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0XE_000 , SCREAMING_SNAKE_CASE=0XE_001 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=16384 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : List[str] = max_position_embeddings A : Any = hidden_size A : List[str] = num_hidden_layers A : Any = num_attention_heads A : Dict = intermediate_size A : str = hidden_act A : Optional[int] = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : str = initializer_range A : List[str] = type_vocab_size A : List[Any] = layer_norm_eps # Character config: A : Optional[int] = downsampling_rate A : int = upsampling_kernel_size A : Any = num_hash_functions A : List[str] = num_hash_buckets A : int = local_transformer_stride
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
0
def __lowerCamelCase (UpperCAmelCase__ : int = 1_0_0 ): SCREAMING_SNAKE_CASE = (n * (n + 1) // 2) ** 2 SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
355
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowercase ( a ): def __snake_case( self : Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCamelCase , "tf_padding" ) ) self.parent.assertTrue(hasattr(_UpperCamelCase , "depth_multiplier" ) ) class lowercase : def __init__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any=13 , _UpperCamelCase : Any=3 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Optional[Any]=0.2_5 , _UpperCamelCase : int=8 , _UpperCamelCase : str=True , _UpperCamelCase : Any=1_024 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : List[str]="relu6" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : int=True , _UpperCamelCase : int=True , _UpperCamelCase : Optional[Any]=10 , _UpperCamelCase : List[str]=None , ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = depth_multiplier SCREAMING_SNAKE_CASE = min_depth SCREAMING_SNAKE_CASE = tf_padding SCREAMING_SNAKE_CASE = int(last_hidden_size * depth_multiplier ) SCREAMING_SNAKE_CASE = output_stride SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = classifier_dropout_prob SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scope def __snake_case( self : Dict ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels, pixel_labels def __snake_case( self : Optional[Any] ) -> str: '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __snake_case( self : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileNetVaModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __snake_case( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowercase ( a , a , unittest.TestCase ): lowercase__ : Dict = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () lowercase__ : Tuple = ( {"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification} if is_torch_available() else {} ) lowercase__ : Union[str, Any] = False lowercase__ : Union[str, Any] = False lowercase__ : Tuple = False lowercase__ : List[str] = False def __snake_case( self : List[str] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileNetVaModelTester(self ) SCREAMING_SNAKE_CASE = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase ) def __snake_case( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def __snake_case( self : Tuple ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def __snake_case( self : Optional[int] ) -> Dict: '''simple docstring''' pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def __snake_case( self : Any ) -> List[str]: '''simple docstring''' pass def __snake_case( self : List[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def __snake_case( self : List[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def __snake_case( self : List[str] ) -> Optional[int]: '''simple docstring''' def check_hidden_states_output(_UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ): SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = 26 self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __snake_case( self : Any ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase ) @slow def __snake_case( self : int ) -> str: '''simple docstring''' for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = MobileNetVaModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def __lowerCamelCase (): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def __snake_case( self : List[Any] ) -> Optional[int]: '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def __snake_case( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_UpperCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
206
0