code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> list: """simple docstring""" SCREAMING_SNAKE_CASE__ = [0] * len(__UpperCamelCase ) for i in range(1 , len(__UpperCamelCase ) ): # use last results for better performance - dynamic programming SCREAMING_SNAKE_CASE__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: SCREAMING_SNAKE_CASE__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 SCREAMING_SNAKE_CASE__ = j return prefix_result def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int: """simple docstring""" return max(prefix_function(__UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
219
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __snake_case ( lowerCamelCase_ ): lowerCAmelCase_ = "Salesforce/blip-image-captioning-base" lowerCAmelCase_ = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) lowerCAmelCase_ = "image_captioner" lowerCAmelCase_ = AutoModelForVisionaSeq lowerCAmelCase_ = ["image"] lowerCAmelCase_ = ["text"] def __init__( self : List[Any] , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ): """simple docstring""" requires_backends(self , ["""vision"""] ) super().__init__(*_lowercase , **_lowercase ) def __a ( self : Tuple , _lowercase : "Image" ): """simple docstring""" return self.pre_processor(images=_lowercase , return_tensors="""pt""" ) def __a ( self : Union[str, Any] , _lowercase : Optional[int] ): """simple docstring""" return self.model.generate(**_lowercase ) def __a ( self : int , _lowercase : Any ): """simple docstring""" return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
219
1
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A__ , unittest.TestCase ): """simple docstring""" a = OpenAIGPTTokenizer a = OpenAIGPTTokenizerFast a = True a = False def lowercase_ ( self : Optional[int] ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE__ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__lowerCamelCase ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__lowerCamelCase ) ) def lowercase_ ( self : Optional[int] , __lowerCamelCase : int ) -> str: return "lower newer", "lower newer" def lowercase_ ( self : List[Any] ) -> Any: SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) SCREAMING_SNAKE_CASE__ = '''lower''' SCREAMING_SNAKE_CASE__ = ['''low''', '''er</w>'''] SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = tokens + ['''<unk>'''] SCREAMING_SNAKE_CASE__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def lowercase_ ( self : List[str] , __lowerCamelCase : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # Simple input SCREAMING_SNAKE_CASE__ = '''This is a simple input''' SCREAMING_SNAKE_CASE__ = ['''This is a simple input 1''', '''This is a simple input 2'''] SCREAMING_SNAKE_CASE__ = ('''This is a simple input''', '''This is a pair''') SCREAMING_SNAKE_CASE__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' ) # Simple input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' ) # Simple input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' , ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' ) # Pair input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' , ) def lowercase_ ( self : int ) -> Tuple: pass @require_ftfy @require_spacy @require_tokenizers class UpperCAmelCase__ ( A__ ): """simple docstring""" pass
218
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _SCREAMING_SNAKE_CASE : Tuple = data_utils.TransfoXLTokenizer _SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLCorpus _SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils _SCREAMING_SNAKE_CASE : Any = data_utils def UpperCAmelCase_ ( _A , _A , _A , _A ): '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(_A , '''rb''' ) as fp: SCREAMING_SNAKE_CASE__ = pickle.load(_A , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__ torch.save(_A , _A ) SCREAMING_SNAKE_CASE__ = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , _A ) SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(_A , _A ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model SCREAMING_SNAKE_CASE__ = os.path.abspath(_A ) SCREAMING_SNAKE_CASE__ = os.path.abspath(_A ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": SCREAMING_SNAKE_CASE__ = TransfoXLConfig() else: SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(_A ) print(F'''Building PyTorch model from configuration: {config}''' ) SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(_A ) SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(_A , _A , _A ) # Save pytorch-model SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A ) SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A ) print(F'''Save PyTorch model to {os.path.abspath(_A )}''' ) torch.save(model.state_dict() , _A ) print(F'''Save configuration file to {os.path.abspath(_A )}''' ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) _SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
218
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" lowercase__ : List[Any] = inspect.getfile(accelerate.test_utils ) lowercase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowercase__ : Tuple = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowercase__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def UpperCAmelCase ( self : Dict ) -> Any: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) lowercase__ : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case ,env=os.environ.copy() ) @require_multi_gpu def UpperCAmelCase ( self : str ) -> List[Any]: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) lowercase__ : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case ,env=os.environ.copy() ) @require_multi_gpu def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" lowercase__ : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case ,env=os.environ.copy() ) @require_multi_gpu def UpperCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) lowercase__ : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 ,cuda_visible_devices='''0,1''' ): execute_subprocess_async(_snake_case ,env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase_ = Accelerator() lowerCAmelCase_ = (accelerator.state.process_index + 2, 10) lowerCAmelCase_ = torch.randint(0, 10, shape).to(accelerator.device) lowerCAmelCase_ = '' lowerCAmelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
16
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
1
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : Optional[Any] = '''xlm-prophetnet''' SCREAMING_SNAKE_CASE_ : Any = ['''past_key_values'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = "gelu" ,SCREAMING_SNAKE_CASE__ = 3_05_22 ,SCREAMING_SNAKE_CASE__ = 10_24 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 5_12 ,SCREAMING_SNAKE_CASE__ = 0.0_2 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1_28 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 2 ,**SCREAMING_SNAKE_CASE__ ,) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :int = vocab_size __SCREAMING_SNAKE_CASE :Tuple = hidden_size __SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim __SCREAMING_SNAKE_CASE :Optional[int] = num_encoder_layers __SCREAMING_SNAKE_CASE :Tuple = num_encoder_attention_heads __SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim __SCREAMING_SNAKE_CASE :Union[str, Any] = num_decoder_layers __SCREAMING_SNAKE_CASE :Optional[Any] = num_decoder_attention_heads __SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE :Dict = init_std # Normal(0, this parameter) __SCREAMING_SNAKE_CASE :List[Any] = activation_function # parameters for xlmprophetnet __SCREAMING_SNAKE_CASE :Tuple = ngram __SCREAMING_SNAKE_CASE :int = num_buckets __SCREAMING_SNAKE_CASE :Optional[int] = relative_max_distance __SCREAMING_SNAKE_CASE :Union[str, Any] = disable_ngram_loss __SCREAMING_SNAKE_CASE :Dict = eps # 3 Types of Dropout __SCREAMING_SNAKE_CASE :List[str] = attention_dropout __SCREAMING_SNAKE_CASE :Dict = activation_dropout __SCREAMING_SNAKE_CASE :Union[str, Any] = dropout __SCREAMING_SNAKE_CASE :int = use_cache super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,add_cross_attention=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) @property def _UpperCamelCase ( self ) -> int: """simple docstring""" return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
363
"""simple docstring""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any ) -> List[Any]: __SCREAMING_SNAKE_CASE :Any = UniSpeechSatForSequenceClassification.from_pretrained(a_ , config=a_ ) __SCREAMING_SNAKE_CASE :int = downstream_dict['''projector.weight'''] __SCREAMING_SNAKE_CASE :List[Any] = downstream_dict['''projector.bias'''] __SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.post_net.linear.weight'''] __SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.post_net.linear.bias'''] return model def __lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE :Any = UniSpeechSatForAudioFrameClassification.from_pretrained(a_ , config=a_ ) __SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.linear.weight'''] __SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.linear.bias'''] return model def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : int ) -> List[str]: __SCREAMING_SNAKE_CASE :List[str] = UniSpeechSatForXVector.from_pretrained(a_ , config=a_ ) __SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''connector.weight'''] __SCREAMING_SNAKE_CASE :Tuple = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __SCREAMING_SNAKE_CASE :str = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __SCREAMING_SNAKE_CASE :int = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __SCREAMING_SNAKE_CASE :Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] __SCREAMING_SNAKE_CASE :Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] __SCREAMING_SNAKE_CASE :Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] __SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] __SCREAMING_SNAKE_CASE :str = downstream_dict['''objective.W'''] return model @torch.no_grad() def __lowerCamelCase ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE :str = torch.load(a_ , map_location='''cpu''' ) __SCREAMING_SNAKE_CASE :str = checkpoint['''Downstream'''] __SCREAMING_SNAKE_CASE :str = UniSpeechSatConfig.from_pretrained(a_ ) __SCREAMING_SNAKE_CASE :List[str] = WavaVecaFeatureExtractor.from_pretrained( a_ , return_attention_mask=a_ , do_normalize=a_ ) __SCREAMING_SNAKE_CASE :Optional[Any] = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): __SCREAMING_SNAKE_CASE :str = convert_classification(a_ , a_ , a_ ) elif arch.endswith('''ForAudioFrameClassification''' ): __SCREAMING_SNAKE_CASE :Tuple = convert_diarization(a_ , a_ , a_ ) elif arch.endswith('''ForXVector''' ): __SCREAMING_SNAKE_CASE :List[Any] = convert_xvector(a_ , a_ , a_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __SCREAMING_SNAKE_CASE :Dict = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(a_ ) hf_model.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") lowerCamelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
239
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class snake_case__ ( unittest.TestCase): @property def A ( self : Optional[int] ) -> List[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def A ( self : Optional[int] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.dummy_uncond_unet UpperCAmelCase_ : int = KarrasVeScheduler() UpperCAmelCase_ : str = KarrasVePipeline(unet=_A , scheduler=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' ).images UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' , return_dict=_A )[0] UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class snake_case__ ( unittest.TestCase): def A ( self : List[str] ) -> str: UpperCAmelCase_ : Dict = """google/ncsnpp-celebahq-256""" UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained(_A ) UpperCAmelCase_ : Union[str, Any] = KarrasVeScheduler() UpperCAmelCase_ : Tuple = KarrasVePipeline(unet=_A , scheduler=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : Any = pipe(num_inference_steps=20 , generator=_A , output_type='''numpy''' ).images UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
304
def _a ( lowerCamelCase ): return " ".join( """""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("""Hey wollef sroirraw"""))
287
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional @dataclass class __snake_case : lowerCAmelCase_ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} ) lowerCAmelCase_ = field( default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) lowerCAmelCase_ = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} ) lowerCAmelCase_ = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowerCAmelCase_ = field(default=2 , metadata={"help": "Batch size for training."} ) lowerCAmelCase_ = field(default=2 , metadata={"help": "Batch size for evaluation."} ) lowerCAmelCase_ = field(default=0.1 , metadata={"help": "Value of weight decay."} ) lowerCAmelCase_ = field( default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) lowerCAmelCase_ = field(default=2E-4 , metadata={"help": "Learning rate fo training."} ) lowerCAmelCase_ = field(default="cosine" , metadata={"help": "Learning rate."} ) lowerCAmelCase_ = field( default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} ) lowerCAmelCase_ = field( default=16 , metadata={"help": "Number of gradient accumulation steps."} ) lowerCAmelCase_ = field( default=lowerCamelCase_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) lowerCAmelCase_ = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} ) lowerCAmelCase_ = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowerCAmelCase_ = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} ) lowerCAmelCase_ = field(default=1 , metadata={"help": "Training seed."} ) lowerCAmelCase_ = field( default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , ) lowerCAmelCase_ = field( default=lowerCamelCase_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} ) lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={"help": "If True the data is pretokenized."} ) @dataclass class __snake_case : lowerCAmelCase_ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowerCAmelCase_ = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowerCAmelCase_ = field(default=2 , metadata={"help": "Batch size used for evaluation."} ) lowerCAmelCase_ = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowerCAmelCase_ = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} ) lowerCAmelCase_ = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) @dataclass class __snake_case : lowerCAmelCase_ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={"help": "Number of workers used for code evaluation."} ) lowerCAmelCase_ = field( default=lowerCamelCase_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , ) lowerCAmelCase_ = field( default=lowerCamelCase_ , metadata={"help": "Sample from the language model's output distribution."} ) lowerCAmelCase_ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} ) lowerCAmelCase_ = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} ) lowerCAmelCase_ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} ) lowerCAmelCase_ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} ) lowerCAmelCase_ = field(default=10 , metadata={"help": "Number of generations to run in parallel."} ) lowerCAmelCase_ = field( default=2_00 , metadata={"help": "Number of completions to generate for each sample."} ) lowerCAmelCase_ = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) lowerCAmelCase_ = field( default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} ) lowerCAmelCase_ = field( default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) lowerCAmelCase_ = field( default=-1 , metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) } , ) @dataclass class __snake_case : lowerCAmelCase_ = field( default=lowerCamelCase_ , metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." } , ) lowerCAmelCase_ = field( default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} ) lowerCAmelCase_ = field( default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} ) lowerCAmelCase_ = field( default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} ) lowerCAmelCase_ = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowerCAmelCase_ = field( default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) lowerCAmelCase_ = field( default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) lowerCAmelCase_ = field( default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) lowerCAmelCase_ = field( default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) lowerCAmelCase_ = field( default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} ) lowerCAmelCase_ = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , ) lowerCAmelCase_ = field( default=lowerCamelCase_ , metadata={"help": "If True, near-duplicate samples are removed."} ) lowerCAmelCase_ = field( default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class __snake_case : lowerCAmelCase_ = field( default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} ) lowerCAmelCase_ = field( default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} ) lowerCAmelCase_ = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowerCAmelCase_ = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} ) lowerCAmelCase_ = field( default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} ) lowerCAmelCase_ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} ) lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={"help": "Push saved tokenizer to the hub."} ) @dataclass class __snake_case : lowerCAmelCase_ = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} ) lowerCAmelCase_ = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} ) lowerCAmelCase_ = field( default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} ) lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={"help": "Number of workers used for code evaluation."} ) @dataclass class __snake_case : lowerCAmelCase_ = field( default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} ) lowerCAmelCase_ = field( default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} ) lowerCAmelCase_ = field(default="codeparrot" , metadata={"help": "Name of the created model."} ) lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={"help": "Push saved tokenizer to the hub."} )
358
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): __lowerCamelCase : Optional[Any] = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: __lowerCamelCase : int = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE__ = numpy_to_pil(__UpperCamelCase ) return images def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int: """simple docstring""" if images.ndim == 3: SCREAMING_SNAKE_CASE__ = images[None, ...] SCREAMING_SNAKE_CASE__ = (images * 2_55).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images SCREAMING_SNAKE_CASE__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: SCREAMING_SNAKE_CASE__ = [Image.fromarray(__UpperCamelCase ) for image in images] return pil_images
204
0
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _a : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' _a : Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('RGB' ) _a : Optional[Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ), ] ) _a : List[str] = transform(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ ) return image def __lowerCamelCase ( lowerCAmelCase_ ) -> int: if "visual_encoder" in key: _a : List[str] = re.sub('visual_encoder*' , 'vision_model.encoder' , lowerCAmelCase_ ) if "blocks" in key: _a : int = re.sub(r'blocks' , 'layers' , lowerCAmelCase_ ) if "attn" in key: _a : Optional[int] = re.sub(r'attn' , 'self_attn' , lowerCAmelCase_ ) if "norm1" in key: _a : List[str] = re.sub(r'norm1' , 'layer_norm1' , lowerCAmelCase_ ) if "norm2" in key: _a : str = re.sub(r'norm2' , 'layer_norm2' , lowerCAmelCase_ ) if "encoder.norm" in key: _a : int = re.sub(r'encoder.norm' , 'post_layernorm' , lowerCAmelCase_ ) if "encoder.patch_embed.proj" in key: _a : Union[str, Any] = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowerCAmelCase_ ) if "encoder.pos_embed" in key: _a : Tuple = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , lowerCAmelCase_ ) if "encoder.cls_token" in key: _a : Tuple = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , lowerCAmelCase_ ) if "self_attn" in key: _a : Any = re.sub(r'self_attn.proj' , 'self_attn.projection' , lowerCAmelCase_ ) return key @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> int: if config_path is not None: _a : Tuple = BlipConfig.from_pretrained(lowerCAmelCase_ ) else: _a : int = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) _a : Dict = BlipForConditionalGeneration(lowerCAmelCase_ ).eval() _a : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' _a : List[str] = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit='base' ) _a : int = pt_model.eval() _a : Any = pt_model.state_dict() for key in modified_state_dict.copy(): _a : List[str] = modified_state_dict.pop(lowerCAmelCase_ ) _a : Optional[Any] = rename_key(lowerCAmelCase_ ) _a : Any = value hf_model.load_state_dict(lowerCAmelCase_ ) _a : Optional[int] = 384 _a : List[str] = load_demo_image(image_size=lowerCAmelCase_ , device='cpu' ) _a : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) _a : int = tokenizer(['a picture of'] ).input_ids _a : List[str] = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_ ) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] _a : List[str] = hf_model.generate(lowerCAmelCase_ ) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowerCAmelCase_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' _a : Tuple = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) _a : Optional[Any] = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' ) vqa_model.eval() _a : Optional[int] = vqa_model.state_dict() for key in modified_state_dict.copy(): _a : int = modified_state_dict.pop(lowerCAmelCase_ ) _a : Optional[int] = rename_key(lowerCAmelCase_ ) _a : Dict = value _a : Optional[int] = BlipForQuestionAnswering(lowerCAmelCase_ ) hf_vqa_model.load_state_dict(lowerCAmelCase_ ) _a : List[str] = ['How many dogs are in this image?'] _a : str = tokenizer(lowerCAmelCase_ , return_tensors='pt' ).input_ids _a : Dict = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) _a : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' _a : List[str] = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' ) itm_model.eval() _a : Any = itm_model.state_dict() for key in modified_state_dict.copy(): _a : Optional[int] = modified_state_dict.pop(lowerCAmelCase_ ) _a : Any = rename_key(lowerCAmelCase_ ) _a : Dict = value _a : Tuple = BlipForImageTextRetrieval(lowerCAmelCase_ ) _a : Any = ['A picture of a woman with a dog sitting in a beach'] _a : List[str] = tokenizer( lowerCAmelCase_ , return_tensors='pt' , padding='max_length' , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(lowerCAmelCase_ ) hf_itm_model.eval() _a : str = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ ) _a : Any = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ ) assert out[0].item() == 0.2_110_687_494_277_954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') __lowerCAmelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
89
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' ) class A ( unittest.TestCase ): @cached_property def lowercase_ (self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = tempfile.mkdtemp() return TatoebaConverter(save_dir=__UpperCAmelCase ) @slow def lowercase_ (self : List[Any] ) -> Optional[int]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def lowercase_ (self : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
65
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base") SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house SCREAMING_SNAKE_CASE_: Optional[int] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(_a)["last_hidden_state"].detach() self.assertEqual(output.shape , _a) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-large") SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house SCREAMING_SNAKE_CASE_: Optional[int] = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): SCREAMING_SNAKE_CASE_: Any = model(_a)["last_hidden_state"].detach() self.assertEqual(output.shape , _a) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3))
363
import os # Precomputes a list of the 100 first triangular numbers lowerCAmelCase : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(_UpperCAmelCase , "words.txt" ) SCREAMING_SNAKE_CASE_: Dict = "" with open(_UpperCAmelCase ) as f: SCREAMING_SNAKE_CASE_: int = f.readline() SCREAMING_SNAKE_CASE_: Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )] SCREAMING_SNAKE_CASE_: List[Any] = [ word for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_UpperCAmelCase ) if __name__ == "__main__": print(solution())
127
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[Any] = '''▁''' _SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''spiece.model'''} _SCREAMING_SNAKE_CASE : int = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } _SCREAMING_SNAKE_CASE : List[Any] = { '''google/reformer-crime-and-punishment''': 52_4288, } class a ( __snake_case ): SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = ["""input_ids""", """attention_mask"""] def __init__( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : Any=[] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowerCamelCase_ = vocab_file lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def UpperCamelCase ( self : Union[str, Any] ) -> Tuple: return self.sp_model.get_piece_size() def UpperCamelCase ( self : Dict ) -> Dict[str, int]: lowerCamelCase_ = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> Any: lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = None return state def __setstate__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: lowerCamelCase_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCamelCase_ = {} lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]: return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Tuple: return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: if index < self.sp_model.get_piece_size(): lowerCamelCase_ = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) return token def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: lowerCamelCase_ = [] lowerCamelCase_ = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowerCamelCase_ = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , 'wb' ) as fi: lowerCamelCase_ = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
183
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch _SCREAMING_SNAKE_CASE : Tuple = random.Random() def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=1.0 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=None ) -> List[str]: if rng is None: lowerCamelCase_ = global_rng lowerCamelCase_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class a ( unittest.TestCase ): def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : int=2000 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=16000 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : Tuple="hann_window" , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : List[str]=7600 , __SCREAMING_SNAKE_CASE : List[str]=1e-1_0 , __SCREAMING_SNAKE_CASE : Any=True , ) -> List[str]: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = min_seq_length lowerCamelCase_ = max_seq_length lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ = feature_size lowerCamelCase_ = padding_value lowerCamelCase_ = sampling_rate lowerCamelCase_ = do_normalize lowerCamelCase_ = num_mel_bins lowerCamelCase_ = hop_length lowerCamelCase_ = win_length lowerCamelCase_ = win_function lowerCamelCase_ = fmin lowerCamelCase_ = fmax lowerCamelCase_ = mel_floor lowerCamelCase_ = return_attention_mask def UpperCamelCase ( self : List[Any] ) -> List[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str: def _flatten(__SCREAMING_SNAKE_CASE : Any ): return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) ) if equal_length: lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowerCamelCase_ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] return speech_inputs def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=False ) -> int: if equal_length: lowerCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] return speech_inputs @require_torch class a ( __snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE : Any = SpeechTaFeatureExtractor def UpperCamelCase ( self : Any ) -> Any: lowerCamelCase_ = SpeechTaFeatureExtractionTester(self ) def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase ( self : List[Any] ) -> int: # Tests that all call wrap to encode_plus and batch_encode_plus lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] # Test not batched input lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test batched lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad'] lowerCamelCase_ = [None, 1600, None] for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='np' ) lowerCamelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase ( self : Dict ) -> Dict: lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ = range(800 , 1400 , 200 ) lowerCamelCase_ = [floats_list((1, x) )[0] for x in lengths] lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad'] lowerCamelCase_ = [None, 1600, None] for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase ( self : Tuple ) -> Optional[int]: lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase_ = feat_extract( __SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1000 , padding='max_length' , return_tensors='np' ) lowerCamelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase ( self : List[str] ) -> List[Any]: lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase_ = feat_extract( __SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1000 , padding='longest' , return_tensors='np' ) lowerCamelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase_ = feat_extract( __SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=2000 , padding='longest' , return_tensors='np' ) lowerCamelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa ) lowerCamelCase_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase ( self : List[Any] ) -> Tuple: # Tests that all call wrap to encode_plus and batch_encode_plus lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ = feature_extractor(audio_target=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test batched lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ = np.asarray(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def UpperCamelCase ( self : int ) -> Union[str, Any]: lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target() lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCamelCase_ = feat_extract.model_input_names[0] lowerCamelCase_ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) ) lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) lowerCamelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCamelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase ( self : Tuple ) -> Any: lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCamelCase_ = feat_extract.model_input_names[0] lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) lowerCamelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCamelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase ( self : List[Any] ) -> List[Any]: lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target() lowerCamelCase_ = feat_extract.model_input_names[0] lowerCamelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCamelCase_ = feat_extract.num_mel_bins # hack! lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name] lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def UpperCamelCase ( self : Dict ) -> Union[str, Any]: lowerCamelCase_ = self.feat_extract_dict lowerCamelCase_ = True lowerCamelCase_ = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target() lowerCamelCase_ = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] lowerCamelCase_ = feat_extract.model_input_names[0] lowerCamelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCamelCase_ = feat_extract.num_mel_bins # hack! lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowerCamelCase_ = self.feat_extract_dict lowerCamelCase_ = True lowerCamelCase_ = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target() lowerCamelCase_ = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] lowerCamelCase_ = feat_extract.model_input_names[0] lowerCamelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCamelCase_ = min(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = feat_extract.num_mel_bins # hack! lowerCamelCase_ = feat_extract.pad( __SCREAMING_SNAKE_CASE , padding='max_length' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='np' ) self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: from datasets import load_dataset lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech lowerCamelCase_ = ds.sort('id' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def UpperCamelCase ( self : str ) -> Dict: # fmt: off lowerCamelCase_ = torch.tensor( [2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3, 3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3, 2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4, 4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3, 7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4, 4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] ) # fmt: on lowerCamelCase_ = self._load_datasamples(1 ) lowerCamelCase_ = SpeechTaFeatureExtractor() lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 93680) ) self.assertTrue(torch.allclose(input_values[0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-6 ) ) def UpperCamelCase ( self : List[Any] ) -> Optional[int]: # fmt: off lowerCamelCase_ = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on lowerCamelCase_ = self._load_datasamples(1 ) lowerCamelCase_ = SpeechTaFeatureExtractor() lowerCamelCase_ = feature_extractor(audio_target=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
183
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=1_024 ): __a , __a : str = [], [] __a : Optional[Any] = list(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a , __a : Any = sorted_examples[0] def is_too_big(_SCREAMING_SNAKE_CASE : Optional[Any] ): return tok(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __a : int = new_src + ' ' + src __a : Optional[int] = new_tgt + ' ' + tgt if is_too_big(_SCREAMING_SNAKE_CASE ) or is_too_big(_SCREAMING_SNAKE_CASE ): # cant fit, finalize example finished_src.append(_SCREAMING_SNAKE_CASE ) finished_tgt.append(_SCREAMING_SNAKE_CASE ) __a , __a : Optional[Any] = src, tgt else: # can fit, keep adding __a , __a : str = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_SCREAMING_SNAKE_CASE ) finished_tgt.append(_SCREAMING_SNAKE_CASE ) return finished_src, finished_tgt def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): __a : Dict = Path(_SCREAMING_SNAKE_CASE ) save_path.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for split in ["train"]: __a , __a : str = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" __a : List[Any] = [x.rstrip() for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] __a : Union[str, Any] = [x.rstrip() for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] __a , __a : List[Any] = pack_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""packed {split} split from {len(_SCREAMING_SNAKE_CASE )} examples -> {len(_SCREAMING_SNAKE_CASE )}.""" ) Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) ) Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) ) for split in ["val", "test"]: __a , __a : int = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(_SCREAMING_SNAKE_CASE , save_path / F"""{split}.source""" ) shutil.copyfile(_SCREAMING_SNAKE_CASE , save_path / F"""{split}.target""" ) def lowerCamelCase (): __a : str = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=_SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=_SCREAMING_SNAKE_CASE , default=128 ) parser.add_argument('--data_dir' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('--save_path' , type=_SCREAMING_SNAKE_CASE ) __a : str = parser.parse_args() __a : Dict = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
294
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
1
"""simple docstring""" import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : int=32 , __lowerCAmelCase : str=16 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=[0, 1, 2, 3] , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Dict=[1, 384, 24, 24] , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = backbone_out_indices _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = backbone_featmap_shape _UpperCAmelCase = scope _UpperCAmelCase = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = num_patches + 1 def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = DPTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = DPTForDepthEstimation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = DPTForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( __snake_case , __snake_case , unittest.TestCase ): _snake_case : Optional[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _snake_case : List[Any] = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) _snake_case : Any = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = False def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = DPTModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True if model_class in get_values(__lowerCAmelCase ): continue _UpperCAmelCase = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _UpperCAmelCase = model(**__lowerCAmelCase ).loss loss.backward() def lowerCAmelCase_ ( self : Any ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = False _UpperCAmelCase = True if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing: continue _UpperCAmelCase = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _UpperCAmelCase = model(**__lowerCAmelCase ).loss loss.backward() def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = _config_zero_init(__lowerCAmelCase ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(config=__lowerCAmelCase ) # Skip the check for the backbone _UpperCAmelCase = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": _UpperCAmelCase = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase_ ( self : List[str] ): pass @slow def lowerCAmelCase_ ( self : Any ): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: _UpperCAmelCase = DPTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 'add' with self.assertRaises(__lowerCAmelCase ): _UpperCAmelCase = DPTForDepthEstimation(__lowerCAmelCase ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision @slow class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) _UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__lowerCAmelCase ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__lowerCAmelCase ) _UpperCAmelCase = outputs.predicted_depth # verify the predicted depth _UpperCAmelCase = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor( [[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowerCAmelCase , atol=1e-4 ) )
289
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor snake_case_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ (__snake_case ): def __init__( self , *a , **a): warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , a , ) super().__init__(*a , **a)
214
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase__ : List[str] =logging.get_logger(__name__) def _lowercase ( _UpperCAmelCase ) -> List[List[ImageInput]]: if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_UpperCAmelCase ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class __A ( a ): __A = ["""pixel_values"""] def __init__( self , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = PILImageResampling.BILINEAR , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = 1 / 255 , UpperCAmelCase_ = True , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCamelCase =size if size is not None else {"""shortest_edge""": 256} lowerCamelCase =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) lowerCamelCase =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase =get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" ) lowerCamelCase =do_resize lowerCamelCase =size lowerCamelCase =do_center_crop lowerCamelCase =crop_size lowerCamelCase =resample lowerCamelCase =do_rescale lowerCamelCase =rescale_factor lowerCamelCase =offset lowerCamelCase =do_normalize lowerCamelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = PILImageResampling.BILINEAR , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCamelCase =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) if "shortest_edge" in size: lowerCamelCase =get_resize_output_image_size(UpperCAmelCase_ , size["""shortest_edge"""] , default_to_square=UpperCAmelCase_ ) elif "height" in size and "width" in size: lowerCamelCase =(size["""height"""], size["""width"""]) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCamelCase =get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCamelCase =image.astype(np.floataa ) if offset: lowerCamelCase =image - (scale / 2) return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowerCamelCase =to_numpy_array(UpperCAmelCase_ ) if do_resize: lowerCamelCase =self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) if do_center_crop: lowerCamelCase =self.center_crop(UpperCAmelCase_ , size=UpperCAmelCase_ ) if do_rescale: lowerCamelCase =self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , offset=UpperCAmelCase_ ) if do_normalize: lowerCamelCase =self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) lowerCamelCase =to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) return image def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = ChannelDimension.FIRST , **UpperCAmelCase_ , ): lowerCamelCase =do_resize if do_resize is not None else self.do_resize lowerCamelCase =resample if resample is not None else self.resample lowerCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase =offset if offset is not None else self.offset lowerCamelCase =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase =image_mean if image_mean is not None else self.image_mean lowerCamelCase =image_std if image_std is not None else self.image_std lowerCamelCase =size if size is not None else self.size lowerCamelCase =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) lowerCamelCase =crop_size if crop_size is not None else self.crop_size lowerCamelCase =get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowerCamelCase =make_batched(UpperCAmelCase_ ) lowerCamelCase =[ [ self._preprocess_image( image=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , crop_size=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ , rescale_factor=UpperCAmelCase_ , offset=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , ) for img in video ] for video in videos ] lowerCamelCase ={"""pixel_values""": videos} return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
262
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCAmelCase__ : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( a ): def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): super().__init__() self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) @torch.no_grad() def __call__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 100 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , ): if audio_length_in_s is None: lowerCamelCase =self.unet.config.sample_size / self.unet.config.sample_rate lowerCamelCase =audio_length_in_s * self.unet.config.sample_rate lowerCamelCase =2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) lowerCamelCase =int(UpperCAmelCase_ ) if sample_size % down_scale_factor != 0: lowerCamelCase =( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" """ process.""" ) lowerCamelCase =int(UpperCAmelCase_ ) lowerCamelCase =next(iter(self.unet.parameters() ) ).dtype lowerCamelCase =(batch_size, self.unet.config.in_channels, sample_size) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowerCamelCase =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ ) # set step values self.scheduler.set_timesteps(UpperCAmelCase_ , device=audio.device ) lowerCamelCase =self.scheduler.timesteps.to(UpperCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase =self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowerCamelCase =self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample lowerCamelCase =audio.clamp(-1 , 1 ).float().cpu().numpy() lowerCamelCase =audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=UpperCAmelCase_ )
262
1
"""simple docstring""" import math import unittest def lowercase ( lowerCAmelCase__ : int ) -> bool: assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ): self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __UpperCAmelCase ( self ): with self.assertRaises(_a ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , ) self.assertFalse( is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
45
def UpperCamelCase ( __magic_name__ : str ) -> list: """simple docstring""" if n_term == "": return [] lowercase__ = [] for temp in range(int(__magic_name__ ) ): series.append(f'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": A : Tuple = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
305
0
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class _A ( lowerCAmelCase ): @staticmethod @abstractmethod def A__ ( __lowerCAmelCase ): """simple docstring""" raise NotImplementedError() @abstractmethod def A__ ( self ): """simple docstring""" raise NotImplementedError()
32
"""simple docstring""" class _A : def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" lowercase = None lowercase = None lowercase = graph self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase ) lowercase = len(__lowerCAmelCase ) lowercase = None def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if sources is int: lowercase = [sources] if sinks is int: lowercase = [sinks] if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0: return lowercase = sources[0] lowercase = sinks[0] # make fake vertex if there are more # than one source or sink if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1: lowercase = 0 for i in sources: max_input_flow += sum(self.graph[i] ) lowercase = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: lowercase = max_input_flow lowercase = 0 lowercase = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: lowercase = max_input_flow lowercase = size - 1 def A__ ( self ): """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = algorithm(self ) class _A : def __init__( self , __lowerCAmelCase ): """simple docstring""" lowercase = flow_network lowercase = flow_network.verticesCount lowercase = flow_network.sourceIndex lowercase = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that lowercase = flow_network.graph lowercase = False def A__ ( self ): """simple docstring""" if not self.executed: self._algorithm() lowercase = True def A__ ( self ): """simple docstring""" pass class _A ( lowerCAmelCase ): def __init__( self , __lowerCAmelCase ): """simple docstring""" super().__init__(__lowerCAmelCase ) # use this to save your result lowercase = -1 def A__ ( self ): """simple docstring""" if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class _A ( lowerCAmelCase ): def __init__( self , __lowerCAmelCase ): """simple docstring""" super().__init__(__lowerCAmelCase ) lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )] lowercase = [0] * self.verticies_count lowercase = [0] * self.verticies_count def A__ ( self ): """simple docstring""" lowercase = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule lowercase = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list lowercase = 0 while i < len(__lowerCAmelCase ): lowercase = vertices_list[i] lowercase = self.heights[vertex_index] self.process_vertex(__lowerCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) ) lowercase = 0 else: i += 1 lowercase = sum(self.preflow[self.source_index] ) def A__ ( self , __lowerCAmelCase ): """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(__lowerCAmelCase , __lowerCAmelCase ) self.relabel(__lowerCAmelCase ) def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" lowercase = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): lowercase = self.heights[to_index] if min_height is not None: lowercase = min_height + 1 if __name__ == "__main__": __lowerCAmelCase : int =[0] __lowerCAmelCase : List[Any] =[3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow() print(F"""maximum flow is {maximum_flow}""")
32
1
'''simple docstring''' lowerCamelCase : Dict = 65_521 def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" lowercase__ = 1 lowercase__ = 0 for plain_chr in plain_text: lowercase__ = (a + ord(A )) % MOD_ADLER lowercase__ = (b + a) % MOD_ADLER return (b << 16) | a
2
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
1
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase : Any = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _lowerCAmelCase : Tuple = logging.get_logger(__name__) class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''mask2former''' SCREAMING_SNAKE_CASE_ =['''swin'''] SCREAMING_SNAKE_CASE_ ={'''hidden_size''': '''hidden_dim'''} def __init__( self : int , snake_case__ : Optional[Dict] = None , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : int = 1_0_2_4 , snake_case__ : str = "relu" , snake_case__ : int = 6 , snake_case__ : int = 1_0 , snake_case__ : int = 8 , snake_case__ : float = 0.0 , snake_case__ : int = 2_0_4_8 , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 4 , snake_case__ : int = 2_5_5 , snake_case__ : int = 1_0_0 , snake_case__ : float = 0.1 , snake_case__ : float = 2.0 , snake_case__ : float = 5.0 , snake_case__ : float = 5.0 , snake_case__ : int = 1_2_5_4_4 , snake_case__ : float = 3.0 , snake_case__ : float = 0.75 , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : bool = True , snake_case__ : List[int] = [4, 8, 1_6, 3_2] , snake_case__ : bool = None , **snake_case__ : int , ): '''simple docstring''' if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." ) UpperCAmelCase__ : Dict = CONFIG_MAPPING["swin"]( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=snake_case__ , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Union[str, Any] = backbone_config.pop("model_type" ) UpperCAmelCase__ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase__ : Optional[int] = config_class.from_dict(snake_case__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ' f'Supported model types: {",".join(self.backbones_supported )}' ) UpperCAmelCase__ : List[str] = backbone_config UpperCAmelCase__ : Union[str, Any] = feature_size UpperCAmelCase__ : Dict = mask_feature_size UpperCAmelCase__ : Optional[int] = hidden_dim UpperCAmelCase__ : List[Any] = encoder_feedforward_dim UpperCAmelCase__ : Optional[int] = activation_function UpperCAmelCase__ : int = encoder_layers UpperCAmelCase__ : Dict = decoder_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[str] = dropout UpperCAmelCase__ : Optional[int] = dim_feedforward UpperCAmelCase__ : Tuple = pre_norm UpperCAmelCase__ : Any = enforce_input_projection UpperCAmelCase__ : Union[str, Any] = common_stride UpperCAmelCase__ : List[str] = ignore_value UpperCAmelCase__ : Any = num_queries UpperCAmelCase__ : Any = no_object_weight UpperCAmelCase__ : Optional[int] = class_weight UpperCAmelCase__ : List[Any] = mask_weight UpperCAmelCase__ : List[Any] = dice_weight UpperCAmelCase__ : Optional[Any] = train_num_points UpperCAmelCase__ : Dict = oversample_ratio UpperCAmelCase__ : Optional[int] = importance_sample_ratio UpperCAmelCase__ : Any = init_std UpperCAmelCase__ : str = init_xavier_std UpperCAmelCase__ : List[str] = use_auxiliary_loss UpperCAmelCase__ : Optional[int] = feature_strides UpperCAmelCase__ : List[str] = output_auxiliary_logits UpperCAmelCase__ : Tuple = decoder_layers super().__init__(**snake_case__ ) @classmethod def __a ( cls : str , snake_case__ : PretrainedConfig , **snake_case__ : Any ): '''simple docstring''' return cls( backbone_config=snake_case__ , **snake_case__ , ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : int = self.backbone_config.to_dict() UpperCAmelCase__ : Optional[Any] = self.__class__.model_type return output
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline SCREAMING_SNAKE_CASE__ : int = { 'n_samples': 64, 'horizon': 32, 'num_inference_steps': 20, 'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network 'scale_grad_by_std': True, 'scale': 0.1, 'eta': 0.0, 't_grad_cutoff': 2, 'device': 'cpu', } if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = 'hopper-medium-v2' SCREAMING_SNAKE_CASE__ : List[str] = gym.make(env_name) SCREAMING_SNAKE_CASE__ : Optional[Any] = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) SCREAMING_SNAKE_CASE__ : Tuple = env.reset() SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1000 SCREAMING_SNAKE_CASE__ : Optional[int] = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy SCREAMING_SNAKE_CASE__ : Any = pipeline(obs, planning_horizon=32) # execute action in environment SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = env.step(denorm_actions) SCREAMING_SNAKE_CASE__ : Any = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) SCREAMING_SNAKE_CASE__ : int = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC lowercase__ :Tuple = parse(importlib.metadata.version("torch")) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' ) lowercase = STR_OPERATION_TO_FUNC[operation] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowercase = parse(importlib.metadata.version(lowerCAmelCase__ ) ) return operation(lowerCAmelCase__ , parse(lowerCAmelCase__ ) ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' return compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
97
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ :Optional[int] = logging.get_logger(__name__) lowercase__ :List[Any] = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : List[Any] ='''camembert''' def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,**A__ ,): super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = position_embedding_type lowercase = use_cache lowercase = classifier_dropout class lowercase ( SCREAMING_SNAKE_CASE__ ): @property def A__ ( self): if self.task == "multiple-choice": lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
97
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase__ = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class lowercase_ ( unittest.TestCase ): '''simple docstring''' __snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __snake_case = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->Optional[int]: """simple docstring""" a = ZeroShotClassificationPipeline( model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , candidate_labels=['''polics''', '''health'''] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) ->int: """simple docstring""" a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' ) self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} ) # No kwarg a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] ) self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} ) a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] ) self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} ) a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' ) self.assertEqual( __UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] ) self.assertEqual( __UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) a = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' ) self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} ) # https://github.com/huggingface/transformers/issues/13846 a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] ) self.assertEqual( __UpperCAmelCase , [ {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]} for i in range(1 ) ] , ) a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] ) self.assertEqual( __UpperCAmelCase , [ {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]} for i in range(2 ) ] , ) with self.assertRaises(__UpperCAmelCase ): classifier('''''' , candidate_labels='''politics''' ) with self.assertRaises(__UpperCAmelCase ): classifier(__UpperCAmelCase , candidate_labels='''politics''' ) with self.assertRaises(__UpperCAmelCase ): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' ) with self.assertRaises(__UpperCAmelCase ): classifier('''Who are you voting for in 2020?''' , candidate_labels=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(__UpperCAmelCase ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__UpperCAmelCase , ) self.run_entailment_id(__UpperCAmelCase ) def __lowerCAmelCase ( self : int , __UpperCAmelCase : Pipeline ) ->int: """simple docstring""" a = zero_shot_classifier.model.config a = config.labelaid a = zero_shot_classifier.entailment_id a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) a = original_labelaid self.assertEqual(__UpperCAmelCase , zero_shot_classifier.entailment_id ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) ->str: """simple docstring""" a = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] ) @require_torch def __lowerCAmelCase ( self : int ) ->Optional[Any]: """simple docstring""" a = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @require_tf def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" a = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @slow @require_torch def __lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' ) a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) a = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' ) a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) a = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , )
0
from math import factorial UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)} def _a ( a :int ) -> int: if not isinstance(a , a ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) ) def _a ( a :int = 60 , a :int = 1_000_000 ) -> int: if not isinstance(a , a ) or not isinstance(a , a ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length a = 0 # the cached sizes of the previous chains a = {} for start_chain_element in range(1 , a ): # The temporary set will contain the elements of the chain a = set() a = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. a = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(a ) chain_set_length += 1 a = digit_factorial_sum(a ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] a = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution()}""")
0
1
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _snake_case ( _snake_case : str , _snake_case : str , **_snake_case : List[Any] ): lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_snake_case , **_snake_case ) lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_config(_snake_case ) model.save_pretrained(_snake_case ) AutoTokenizer.from_pretrained(_snake_case ).save_pretrained(_snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
314
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
1
from __future__ import annotations import pandas as pd def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> list[int]: """simple docstring""" UpperCamelCase :List[str] = [0] * no_of_processes UpperCamelCase :str = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(__magic_name__ ): UpperCamelCase :Optional[int] = burst_time[i] UpperCamelCase :str = 0 UpperCamelCase :Tuple = 0 UpperCamelCase :Union[str, Any] = 9_9999_9999 UpperCamelCase :Optional[Any] = 0 UpperCamelCase :Optional[int] = False # Process until all processes are completed while complete != no_of_processes: for j in range(__magic_name__ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: UpperCamelCase :Dict = remaining_time[j] UpperCamelCase :Optional[Any] = j UpperCamelCase :List[str] = True if not check: increment_time += 1 continue remaining_time[short] -= 1 UpperCamelCase :List[str] = remaining_time[short] if minm == 0: UpperCamelCase :Any = 9_9999_9999 if remaining_time[short] == 0: complete += 1 UpperCamelCase :Dict = False # Find finish time of current process UpperCamelCase :Dict = increment_time + 1 # Calculate waiting time UpperCamelCase :Union[str, Any] = finish_time - arrival_time[short] UpperCamelCase :Optional[Any] = finar - burst_time[short] if waiting_time[short] < 0: UpperCamelCase :Optional[Any] = 0 # Increment time increment_time += 1 return waiting_time def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[int] ) -> list[int]: """simple docstring""" UpperCamelCase :Optional[Any] = [0] * no_of_processes for i in range(__magic_name__ ): UpperCamelCase :List[str] = burst_time[i] + waiting_time[i] return turn_around_time def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> None: """simple docstring""" UpperCamelCase :int = 0 UpperCamelCase :str = 0 for i in range(__magic_name__ ): UpperCamelCase :List[Any] = total_waiting_time + waiting_time[i] UpperCamelCase :str = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print("""Average turn around time =""" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('''Enter how many process you want to analyze''') UpperCAmelCase_ : Any = int(input()) UpperCAmelCase_ : Optional[int] = [0] * no_of_processes UpperCAmelCase_ : Any = [0] * no_of_processes UpperCAmelCase_ : Any = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('''Enter the arrival time and burst time for process:--''' + str(i + 1)) UpperCAmelCase_ , UpperCAmelCase_ : Dict = map(int, input().split()) UpperCAmelCase_ : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) UpperCAmelCase_ : Tuple = burst_time UpperCAmelCase_ : List[str] = no_of_processes UpperCAmelCase_ : Union[str, Any] = waiting_time UpperCAmelCase_ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) UpperCAmelCase_ : Tuple = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ '''Process''', '''BurstTime''', '''ArrivalTime''', '''WaitingTime''', '''TurnAroundTime''', ], ) # Printing the dataFrame pd.set_option('''display.max_rows''', fcfs.shape[0] + 1) print(fcfs)
38
"""simple docstring""" lowercase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowercase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = from_type.lower().strip('s' ) _lowerCamelCase : List[Any] = to_type.lower().strip('s' ) _lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) _lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) if from_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Tuple = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) if to_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Any = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) _lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized] _lowerCamelCase : int = METRIC_CONVERSION[to_sanitized] _lowerCamelCase : List[str] = 1 if from_exponent > to_exponent: _lowerCamelCase : List[str] = from_exponent - to_exponent else: _lowerCamelCase : List[Any] = -(to_exponent - from_exponent) return value * pow(10 , lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
96
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class _A ( __lowercase ): lowercase__: Any = """swin2sr""" lowercase__: Optional[Any] = { """hidden_size""": """embed_dim""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Optional[int] , __magic_name__ : str=64 , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=3 , __magic_name__ : Optional[int]=1_80 , __magic_name__ : Any=[6, 6, 6, 6, 6, 6] , __magic_name__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __magic_name__ : List[str]=8 , __magic_name__ : Union[str, Any]=2.0 , __magic_name__ : str=True , __magic_name__ : List[str]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : str="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : str=2 , __magic_name__ : List[Any]=1.0 , __magic_name__ : Tuple="1conv" , __magic_name__ : Dict="pixelshuffle" , **__magic_name__ : Optional[int] , ) -> Optional[int]: """simple docstring""" super().__init__(**_A ) __snake_case : Optional[int] = image_size __snake_case : Tuple = patch_size __snake_case : List[Any] = num_channels __snake_case : Tuple = embed_dim __snake_case : Optional[int] = depths __snake_case : Dict = len(_A ) __snake_case : List[str] = num_heads __snake_case : Optional[Any] = window_size __snake_case : Optional[Any] = mlp_ratio __snake_case : Dict = qkv_bias __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : Dict = drop_path_rate __snake_case : str = hidden_act __snake_case : Union[str, Any] = use_absolute_embeddings __snake_case : Any = layer_norm_eps __snake_case : List[str] = initializer_range __snake_case : str = upscale __snake_case : Any = img_range __snake_case : List[str] = resi_connection __snake_case : Optional[Any] = upsampler
356
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
0
'''simple docstring''' import math import random def __magic_name__ ( A , A = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCAmelCase_ = 0.02 def __magic_name__ ( A , A ) -> float: snake_case = float(2 * (random.randint(1 , 1_0_0 )) - 1 ) for _ in range(A ): # Forward propagation snake_case = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? snake_case = (expected / 1_0_0) - layer_a # Error delta snake_case = layer_1_error * sigmoid_function(A , A ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_0_0 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = int(input("Expected value: ")) lowerCAmelCase_ = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
354
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> int: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCAmelCase_ :Dict = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} lowerCAmelCase_ :int = parent lowerCAmelCase_ :Optional[Any] = batch_size lowerCAmelCase_ :str = num_channels lowerCAmelCase_ :int = min_resolution lowerCAmelCase_ :Tuple = max_resolution lowerCAmelCase_ :Any = do_resize lowerCAmelCase_ :int = size lowerCAmelCase_ :Optional[Any] = do_normalize lowerCAmelCase_ :Optional[int] = image_mean lowerCAmelCase_ :Tuple = image_std lowerCAmelCase_ :Any = do_rescale lowerCAmelCase_ :List[Any] = rescale_factor lowerCAmelCase_ :str = do_pad def __lowerCAmelCase ( self ) -> Tuple: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowerCAmelCase ( self , __A , __A=False ) -> Tuple: if not batched: lowerCAmelCase_ :Optional[Any] = image_inputs[0] if isinstance(__A , Image.Image ): lowerCAmelCase_ , lowerCAmelCase_ :int = image.size else: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = image.shape[1], image.shape[2] if w < h: lowerCAmelCase_ :Tuple = int(self.size["""shortest_edge"""] * h / w ) lowerCAmelCase_ :List[Any] = self.size["""shortest_edge"""] elif w > h: lowerCAmelCase_ :List[str] = self.size["""shortest_edge"""] lowerCAmelCase_ :Dict = int(self.size["""shortest_edge"""] * w / h ) else: lowerCAmelCase_ :Dict = self.size["""shortest_edge"""] lowerCAmelCase_ :Tuple = self.size["""shortest_edge"""] else: lowerCAmelCase_ :Union[str, Any] = [] for image in image_inputs: lowerCAmelCase_ , lowerCAmelCase_ :int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase_ :List[Any] = max(__A , key=lambda __A : item[0] )[0] lowerCAmelCase_ :Any = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :int = DetaImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Tuple = DetaImageProcessingTester(self ) @property def __lowerCAmelCase ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , """image_mean""" ) ) self.assertTrue(hasattr(__A , """image_std""" ) ) self.assertTrue(hasattr(__A , """do_normalize""" ) ) self.assertTrue(hasattr(__A , """do_resize""" ) ) self.assertTrue(hasattr(__A , """do_rescale""" ) ) self.assertTrue(hasattr(__A , """do_pad""" ) ) self.assertTrue(hasattr(__A , """size""" ) ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __A ) def __lowerCAmelCase ( self ) -> Optional[Any]: pass def __lowerCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processing lowerCAmelCase_ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input lowerCAmelCase_ :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.image_processor_tester.get_expected_values(__A , batched=__A ) lowerCAmelCase_ :Dict = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCAmelCase ( self ) -> Any: # Initialize image_processing lowerCAmelCase_ :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input lowerCAmelCase_ :Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ :List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ :int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCAmelCase ( self ) -> List[Any]: # Initialize image_processing lowerCAmelCase_ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input lowerCAmelCase_ :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ :int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ :List[Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCAmelCase ( self ) -> Dict: # prepare image and target lowerCAmelCase_ :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: lowerCAmelCase_ :Dict = json.loads(f.read() ) lowerCAmelCase_ :List[Any] = {"""image_id""": 3_9769, """annotations""": target} # encode them lowerCAmelCase_ :Any = DetaImageProcessor() lowerCAmelCase_ :Tuple = image_processing(images=__A , annotations=__A , return_tensors="""pt""" ) # verify pixel values lowerCAmelCase_ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __A ) lowerCAmelCase_ :int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1E-4 ) ) # verify area lowerCAmelCase_ :Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) ) # verify boxes lowerCAmelCase_ :Dict = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A ) lowerCAmelCase_ :List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1E-3 ) ) # verify image_id lowerCAmelCase_ :Optional[int] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) ) # verify is_crowd lowerCAmelCase_ :Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) ) # verify class_labels lowerCAmelCase_ :List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) ) # verify orig_size lowerCAmelCase_ :List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) ) # verify size lowerCAmelCase_ :Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) ) @slow def __lowerCAmelCase ( self ) -> int: # prepare image, target and masks_path lowerCAmelCase_ :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: lowerCAmelCase_ :int = json.loads(f.read() ) lowerCAmelCase_ :Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} lowerCAmelCase_ :Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them lowerCAmelCase_ :int = DetaImageProcessor(format="""coco_panoptic""" ) lowerCAmelCase_ :Union[str, Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" ) # verify pixel values lowerCAmelCase_ :List[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __A ) lowerCAmelCase_ :Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1E-4 ) ) # verify area lowerCAmelCase_ :List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) ) # verify boxes lowerCAmelCase_ :int = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A ) lowerCAmelCase_ :Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1E-3 ) ) # verify image_id lowerCAmelCase_ :Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) ) # verify is_crowd lowerCAmelCase_ :List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) ) # verify class_labels lowerCAmelCase_ :Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) ) # verify masks lowerCAmelCase_ :Dict = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A ) # verify orig_size lowerCAmelCase_ :Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) ) # verify size lowerCAmelCase_ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
84
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
1
lowerCAmelCase__ : str ={ "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on lowerCAmelCase__ : Union[str, Any] ={value: key for key, value in MORSE_CODE_DICT.items()} def __lowercase ( a__ ) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def __lowercase ( a__ ) -> str: return "".join(REVERSE_DICT[char] for char in message.split() ) def __lowercase ( ) -> None: __SCREAMING_SNAKE_CASE = "Morse code here!" print(_snake_case ) __SCREAMING_SNAKE_CASE = encrypt(_snake_case ) print(_snake_case ) __SCREAMING_SNAKE_CASE = decrypt(_snake_case ) print(_snake_case ) if __name__ == "__main__": main()
369
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ : List[str] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
118
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A__: List[str] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[Any] = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Union[str, Any] = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
276
'''simple docstring''' class A__ : def __init__( self :List[Any] ) -> None: '''simple docstring''' _a : dict[str, TrieNode] ={} # Mapping from char to TrieNode _a : List[str] =False def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None: '''simple docstring''' for word in words: self.insert(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None: '''simple docstring''' _a : str =self for char in word: if char not in curr.nodes: _a : Dict =TrieNode() _a : List[Any] =curr.nodes[char] _a : int =True def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool: '''simple docstring''' _a : int =self for char in word: if char not in curr.nodes: return False _a : List[Any] =curr.nodes[char] return curr.is_leaf def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None: '''simple docstring''' def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool: if index == len(SCREAMING_SNAKE_CASE ): # If word does not exist if not curr.is_leaf: return False _a : Any =False return len(curr.nodes ) == 0 _a : int =word[index] _a : int =curr.nodes.get(SCREAMING_SNAKE_CASE ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , SCREAMING_SNAKE_CASE , 0 ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None: if node.is_leaf: print(_UpperCAmelCase ,end=""" """ ) for key, value in node.nodes.items(): print_words(_UpperCAmelCase ,word + key ) def SCREAMING_SNAKE_CASE_ ( ) -> bool: _a : List[str] ="""banana bananas bandana band apple all beast""".split() _a : List[Any] =TrieNode() root.insert_many(_UpperCAmelCase ) # print_words(root, "") assert all(root.find(_UpperCAmelCase ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None: print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" ) def SCREAMING_SNAKE_CASE_ ( ) -> None: assert test_trie() def SCREAMING_SNAKE_CASE_ ( ) -> None: print_results("""Testing trie functionality""" ,test_trie() ) if __name__ == "__main__": main()
276
1
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__) lowerCAmelCase__ : Dict ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", } lowerCAmelCase__ : List[Any] ={ "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } lowerCAmelCase__ : List[Any] ={ "ctrl": 256, } lowerCAmelCase__ : str ={ "Pregnancy": 168629, "Christianity": 7675, "Explain": 106423, "Fitness": 63440, "Saving": 63163, "Ask": 27171, "Ass": 95985, "Joke": 163509, "Questions": 45622, "Thoughts": 49605, "Retail": 52342, "Feminism": 164338, "Writing": 11992, "Atheism": 192263, "Netflix": 48616, "Computing": 39639, "Opinion": 43213, "Alone": 44967, "Funny": 58917, "Gaming": 40358, "Human": 4088, "India": 1331, "Joker": 77138, "Diet": 36206, "Legal": 11859, "Norman": 4939, "Tip": 72689, "Weight": 52343, "Movies": 46273, "Running": 23425, "Science": 2090, "Horror": 37793, "Confession": 60572, "Finance": 12250, "Politics": 16360, "Scary": 191985, "Support": 12654, "Technologies": 32516, "Teenage": 66160, "Event": 32769, "Learned": 67460, "Notion": 182770, "Wikipedia": 37583, "Books": 6665, "Extract": 76050, "Confessions": 102701, "Conspiracy": 75932, "Links": 63674, "Narcissus": 150425, "Relationship": 54766, "Relationships": 134796, "Reviews": 41671, "News": 4256, "Translation": 26820, "multilingual": 128406, } def __lowercase ( a__ ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE = char __SCREAMING_SNAKE_CASE = set(_UpperCAmelCase ) return pairs class UpperCAmelCase_ ( snake_case__ ): '''simple docstring''' UpperCamelCase__ : List[Any] = VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Optional[Any] = CONTROL_CODES def __init__( self , _A , _A , _A="<unk>" , **_A ): '''simple docstring''' super().__init__(unk_token=UpperCAmelCase_ , **UpperCAmelCase_ ) with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle: __SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle: __SCREAMING_SNAKE_CASE = merges_handle.read().split('\n' )[1:-1] __SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges] __SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) __SCREAMING_SNAKE_CASE = {} @property def _A ( self ): '''simple docstring''' return len(self.encoder ) def _A ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _A ( self , _A ): '''simple docstring''' if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE = tuple(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) __SCREAMING_SNAKE_CASE = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE = min(UpperCAmelCase_ , key=lambda _A : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE = bigram __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 while i < len(UpperCAmelCase_ ): try: __SCREAMING_SNAKE_CASE = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE = tuple(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_word if len(UpperCAmelCase_ ) == 1: break else: __SCREAMING_SNAKE_CASE = get_pairs(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = "@@ ".join(UpperCAmelCase_ ) __SCREAMING_SNAKE_CASE = word[:-4] __SCREAMING_SNAKE_CASE = word return word def _A ( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = re.findall(r'\S+\n?' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) ) return split_tokens def _A ( self , _A ): '''simple docstring''' return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def _A ( self , _A ): '''simple docstring''' return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def _A ( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = " ".join(UpperCAmelCase_ ).replace('@@ ' , '' ).strip() return out_string def _A ( self , _A , _A = None ): '''simple docstring''' if not os.path.isdir(UpperCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __SCREAMING_SNAKE_CASE = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '\n' ) __SCREAMING_SNAKE_CASE = 0 with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) __SCREAMING_SNAKE_CASE = token_index writer.write(' '.join(UpperCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
365
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase__ : str ={'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] =[ '''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMAEForPreTraining''', '''ViTMAELayer''', '''ViTMAEModel''', '''ViTMAEPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] =[ '''TFViTMAEForPreTraining''', '''TFViTMAEModel''', '''TFViTMAEPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
118
0
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class UpperCAmelCase : '''simple docstring''' def __init__( self : str , __lowercase : List[str] , __lowercase : Optional[int]=14 , __lowercase : Any=7 , __lowercase : List[Any]=True , __lowercase : Union[str, Any]=True , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]=True , __lowercase : Union[str, Any]=99 , __lowercase : Any=32 , __lowercase : str=5 , __lowercase : int=4 , __lowercase : str=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Any=0.1 , __lowercase : Any=0.1 , __lowercase : Tuple=5_12 , __lowercase : str=16 , __lowercase : str=2 , __lowercase : List[Any]=0.02 , __lowercase : int=3 , __lowercase : List[str]=4 , __lowercase : int=None , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_token_type_ids snake_case_ = use_input_mask snake_case_ = use_labels snake_case_ = use_mc_token_ids snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope snake_case_ = self.vocab_size - 1 def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None if self.use_mc_token_ids: snake_case_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() snake_case_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def snake_case__ ( self : int ): """simple docstring""" return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def snake_case__ ( self : Any , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Any , __lowercase : str , *__lowercase : str ): """simple docstring""" snake_case_ = CTRLModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ ) model(lowercase_ , token_type_ids=lowercase_ ) snake_case_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def snake_case__ ( self : Optional[int] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Dict , __lowercase : List[str] , __lowercase : Any , *__lowercase : Optional[Any] ): """simple docstring""" snake_case_ = CTRLLMHeadModel(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( snake_case_ ) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def snake_case__ ( self : Any , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : Optional[int] , *__lowercase : Optional[Any] ): """simple docstring""" snake_case_ = self.num_labels snake_case_ = CTRLForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class UpperCAmelCase ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () lowerCAmelCase_ = (CTRLLMHeadModel,) if is_torch_available() else () lowerCAmelCase_ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False def snake_case__ ( self : Dict , __lowercase : Dict , __lowercase : Any , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[Any] ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = CTRLModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowercase_ , n_embd=37 ) def snake_case__ ( self : Optional[int] ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def snake_case__ ( self : int ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*lowercase_ ) def snake_case__ ( self : Any ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase_ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case__ ( self : List[str] ): """simple docstring""" pass @slow def snake_case__ ( self : Any ): """simple docstring""" for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = CTRLModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :) def snake_case__ ( self : Any ): """simple docstring""" pass @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self : str ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def snake_case__ ( self : Optional[int] ): """simple docstring""" snake_case_ = CTRLLMHeadModel.from_pretrained("ctrl" ) model.to(lowercase_ ) snake_case_ = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=lowercase_ ) # Legal the president is snake_case_ = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a snake_case_ = model.generate(lowercase_ , do_sample=lowercase_ ) self.assertListEqual(output_ids[0].tolist() , lowercase_ )
187
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __SCREAMING_SNAKE_CASE ( A_=None ): if subparsers is not None: lowerCAmelCase__ : Optional[Any] = subparsers.add_parser('''test''' ) else: lowerCAmelCase__ : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' , default=A_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=A_ ) return parser def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: lowerCAmelCase__ : Optional[Any] = script_name else: lowerCAmelCase__ : Any = f'--config_file={args.config_file} {script_name}' lowerCAmelCase__ : List[Any] = ['''accelerate-launch'''] + test_args.split() lowerCAmelCase__ : int = execute_subprocess_async(A_ , env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Any = test_command_parser() lowerCAmelCase__ : List[Any] = parser.parse_args() test_command(A_ ) if __name__ == "__main__": main()
106
0
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : int, _UpperCamelCase : List[str], _UpperCamelCase : Any ) -> Tuple: if isinstance(lowerCAmelCase__, lowerCAmelCase__ ): A_ = np.full((len(lowerCAmelCase__ ), sequence_length, 2), lowerCAmelCase__ ) else: A_ = np.full((len(lowerCAmelCase__ ), sequence_length), lowerCAmelCase__ ) for i, tensor in enumerate(lowerCAmelCase__ ): if padding_side == "right": if isinstance(lowerCAmelCase__, lowerCAmelCase__ ): A_ = tensor[:sequence_length] else: A_ = tensor[:sequence_length] else: if isinstance(lowerCAmelCase__, lowerCAmelCase__ ): A_ = tensor[:sequence_length] else: A_ = tensor[:sequence_length] return out_tensor.tolist() def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]: A_ = ord(lowerCAmelCase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True A_ = unicodedata.category(lowerCAmelCase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __UpperCAmelCase ( __lowercase ): '''simple docstring''' __lowercase : PreTrainedTokenizerBase __lowercase : Union[bool, str, PaddingStrategy] = True __lowercase : Optional[int] = None __lowercase : Optional[int] = None __lowercase : int = -100 __lowercase : str = "pt" def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: import torch A_ = '''label''' if '''label''' in features[0].keys() else '''labels''' A_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None A_ = self.tokenizer.pad( snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch A_ = torch.tensor(batch['''entity_ids'''] ).shape[1] A_ = self.tokenizer.padding_side if padding_side == "right": A_ = [ list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels ] else: A_ = [ [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels ] A_ = [feature['''ner_tags'''] for feature in features] A_ = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ ) A_ = [feature['''original_entity_spans'''] for feature in features] A_ = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ ) A_ = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
351
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel __snake_case : str = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __A ( cls ) -> Dict: A_ = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def __A ( cls ) -> Optional[int]: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __A ( self ) -> str: A_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) def __A ( self ) -> List[str]: A_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict: A_ = True A_ = flatten_dict(modela.params ) A_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: A_ = False return models_are_equal @require_flax class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __A ( self ) -> List[str]: A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) A_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A ( self ) -> List[Any]: A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) A_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A ( self ) -> Dict: A_ = '''bert''' A_ = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __A ( self ) -> Optional[Any]: A_ = '''bert''' A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
18
0
from typing import Any import numpy as np def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray ): return np.array_equal(lowerCAmelCase__ , matrix.conjugate().T ) def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ): __a : Optional[Any] = v.conjugate().T __a : Optional[int] = v_star.dot(lowerCAmelCase__ ) assert isinstance(lowerCAmelCase__ , np.ndarray ) return (v_star_dot.dot(lowerCAmelCase__ )) / (v_star.dot(lowerCAmelCase__ )) def __UpperCamelCase ( ): __a : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __a : int = np.array([[1], [2], [3]] ) assert is_hermitian(lowerCAmelCase__ ), f"{a} is not hermitian." print(rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) ) __a : Optional[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowerCAmelCase__ ), f"{a} is not hermitian." assert rayleigh_quotient(lowerCAmelCase__ , lowerCAmelCase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
216
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image lowercase__ =['text', 'image', 'audio'] def __UpperCamelCase ( lowerCAmelCase__ : List[str] ): __a : Optional[int] = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): inputs.append(create_inputs(lowerCAmelCase__ ) ) else: raise ValueError(f"Invalid type requested: {input_type}" ) return inputs def __UpperCamelCase ( lowerCAmelCase__ : List ): __a : List[str] = [] for output in outputs: if isinstance(lowerCAmelCase__ , (str, AgentText) ): output_types.append('''text''' ) elif isinstance(lowerCAmelCase__ , (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(lowerCAmelCase__ , (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f"Invalid output: {output}" ) return output_types @is_tool_test class UpperCamelCase__ : def lowerCAmelCase (self : Any ): self.assertTrue(hasattr(self.tool , '''inputs''' ) ) self.assertTrue(hasattr(self.tool , '''outputs''' ) ) __a : Any = self.tool.inputs for _input in inputs: if isinstance(_input , snake_case_ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) __a : Optional[int] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def lowerCAmelCase (self : List[Any] ): __a : Union[str, Any] = create_inputs(self.tool.inputs ) __a : List[Any] = self.tool(*snake_case_ ) # There is a single output if len(self.tool.outputs ) == 1: __a : Tuple = [outputs] self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs ) def lowerCAmelCase (self : List[Any] ): self.assertTrue(hasattr(self.tool , '''description''' ) ) self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def lowerCAmelCase (self : Any ): __a : Any = create_inputs(self.tool.inputs ) __a : Union[str, Any] = self.tool(*snake_case_ ) if not isinstance(snake_case_ , snake_case_ ): __a : Tuple = [outputs] self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) ) for output, output_type in zip(snake_case_ , self.tool.outputs ): __a : List[Any] = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(snake_case_ , snake_case_ ) ) def lowerCAmelCase (self : Optional[int] ): __a : Any = create_inputs(self.tool.inputs ) __a : Dict = [] for _input, input_type in zip(snake_case_ , self.tool.inputs ): if isinstance(snake_case_ , snake_case_ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error __a : Optional[Any] = self.tool(*snake_case_ ) if not isinstance(snake_case_ , snake_case_ ): __a : Dict = [outputs] self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
216
1
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class UpperCAmelCase ( nn.Module ): def __init__( self : Any , __snake_case : nn.Module , __snake_case : int ) -> str: super().__init__() _lowerCAmelCase = module _lowerCAmelCase = nn.Sequential( nn.Linear(module.in_features , __snake_case , bias=__snake_case ) , nn.Linear(__snake_case , module.out_features , bias=__snake_case ) , ) _lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__snake_case ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : Optional[Any] ) -> Dict: return self.module(__snake_case , *__snake_case , **__snake_case ) + self.adapter(__snake_case ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module _lowercase: Optional[int] = '''bigscience/bloom-1b7''' # Constant values _lowercase: Union[str, Any] = 2.109659552692574 _lowercase: Union[str, Any] = '''Hello my name is''' _lowercase: List[Any] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase: int = 10 def lowercase__ ( self : str ) -> Optional[Any]: # Models and tokenizer _lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name ) class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : Optional[int] ) -> List[str]: super().setUp() # Models and tokenizer _lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" ) def lowercase__ ( self : List[str] ) -> Tuple: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> int: _lowerCAmelCase = self.model_abit.config self.assertTrue(hasattr(__snake_case , """quantization_config""" ) ) _lowerCAmelCase = config.to_dict() _lowerCAmelCase = config.to_diff_dict() _lowerCAmelCase = config.to_json_string() def lowercase__ ( self : Optional[Any] ) -> List[Any]: from bitsandbytes.nn import Paramsabit _lowerCAmelCase = self.model_fpaa.get_memory_footprint() _lowerCAmelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) _lowerCAmelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Dict ) -> Union[str, Any]: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__snake_case , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Union[str, Any] ) -> str: _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ) _lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__snake_case ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: _lowerCAmelCase = BitsAndBytesConfig() _lowerCAmelCase = True _lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__snake_case , device_map="""auto""" ) _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ) _lowerCAmelCase = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__snake_case ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Tuple ) -> List[str]: with self.assertRaises(__snake_case ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__snake_case ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: _lowerCAmelCase = BitsAndBytesConfig() with self.assertRaises(__snake_case ): _lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__snake_case , load_in_abit=__snake_case , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowercase__ ( self : Any ) -> Dict: with self.assertRaises(__snake_case ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(__snake_case ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__snake_case ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(__snake_case ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__snake_case ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ) _lowerCAmelCase = self.model_fpaa.to(torch.floataa ) _lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error _lowerCAmelCase = self.model_fpaa.to("""cpu""" ) # Check this does not throw an error _lowerCAmelCase = self.model_fpaa.half() # Check this does not throw an error _lowerCAmelCase = self.model_fpaa.float() def lowercase__ ( self : List[str] ) -> Dict: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=__snake_case , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): @classmethod def lowercase__ ( cls : Optional[int] ) -> Optional[Any]: _lowerCAmelCase = """t5-small""" _lowerCAmelCase = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense _lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name ) _lowerCAmelCase = """Translate in German: Hello, my dog is cute""" def lowercase__ ( self : Any ) -> List[str]: gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> Dict: from transformers import TaForConditionalGeneration _lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules _lowerCAmelCase = None # test with `t5-small` _lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" ) _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _lowerCAmelCase = model.generate(**__snake_case ) # test with `flan-t5-small` _lowerCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__snake_case , device_map="""auto""" ) _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _lowerCAmelCase = model.generate(**__snake_case ) _lowerCAmelCase = modules def lowercase__ ( self : Optional[int] ) -> List[Any]: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` _lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _lowerCAmelCase = model.generate(**__snake_case ) # test with `flan-t5-small` _lowerCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__snake_case , device_map="""auto""" ) _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _lowerCAmelCase = model.generate(**__snake_case ) class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : Optional[Any] ) -> List[Any]: super().setUp() # model_name _lowerCAmelCase = """bigscience/bloom-560m""" _lowerCAmelCase = """t5-small""" # Different types of model _lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" ) # Sequence classification model _lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__snake_case , device_map="""auto""" ) # CausalLM model _lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" ) # Seq2seq model _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__snake_case , device_map="""auto""" ) def lowercase__ ( self : Optional[Any] ) -> Optional[Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> Optional[Any]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : List[Any] ) -> Dict: super().setUp() def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Union[str, Any] ) -> int: _lowerCAmelCase = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass _lowerCAmelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : int ) -> Union[str, Any]: super().setUp() def lowercase__ ( self : Union[str, Any] ) -> List[str]: _lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__snake_case , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model _lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch _lowerCAmelCase = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__snake_case ) , self.EXPECTED_OUTPUTS ) class UpperCAmelCase ( snake_case_ ): def lowercase__ ( self : Tuple ) -> Union[str, Any]: _lowerCAmelCase = """facebook/opt-350m""" super().setUp() def lowercase__ ( self : Optional[Any] ) -> Dict: if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters _lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__snake_case ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): _lowerCAmelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability _lowerCAmelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__snake_case ) ): _lowerCAmelCase = LoRALayer(module.q_proj , rank=16 ) _lowerCAmelCase = LoRALayer(module.k_proj , rank=16 ) _lowerCAmelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch _lowerCAmelCase = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): _lowerCAmelCase = model.forward(**__snake_case ) out.logits.norm().backward() for module in model.modules(): if isinstance(__snake_case , __snake_case ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__snake_case , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = '''gpt2-xl''' _lowercase: Tuple = 3.3191854854152187
353
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase__ ( self : List[Any] ) -> List[str]: _lowerCAmelCase = 1 _lowerCAmelCase = 3 _lowerCAmelCase = (32, 32) _lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case ) return image @property def lowercase__ ( self : int ) -> Union[str, Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) return model @property def lowercase__ ( self : Optional[int] ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def lowercase__ ( self : Dict ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(__snake_case ) @property def lowercase__ ( self : Union[str, Any] ) -> str: def extract(*__snake_case : List[Any] , **__snake_case : Any ): class UpperCAmelCase : def __init__( self : Any ) -> Any: _lowerCAmelCase = torch.ones([0] ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Dict: self.pixel_values.to(__snake_case ) return self return Out() return extract def lowercase__ ( self : List[str] ) -> Optional[int]: _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.dummy_cond_unet _lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case ) _lowerCAmelCase = self.dummy_vae _lowerCAmelCase = self.dummy_text_encoder _lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) _lowerCAmelCase = 77 _lowerCAmelCase = self.dummy_image.to(__snake_case ) _lowerCAmelCase = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _lowerCAmelCase = AltDiffusionImgaImgPipeline( unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case ) _lowerCAmelCase = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = """A painting of a squirrel eating a burger""" _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 ) _lowerCAmelCase = alt_pipe( [prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , ) _lowerCAmelCase = output.images _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 ) _lowerCAmelCase = alt_pipe( [prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , return_dict=__snake_case , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCAmelCase = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowercase__ ( self : Tuple ) -> str: _lowerCAmelCase = self.dummy_cond_unet _lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case ) _lowerCAmelCase = self.dummy_vae _lowerCAmelCase = self.dummy_text_encoder _lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) _lowerCAmelCase = 77 _lowerCAmelCase = self.dummy_image.to(__snake_case ) # put models in fp16 _lowerCAmelCase = unet.half() _lowerCAmelCase = vae.half() _lowerCAmelCase = bert.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase = AltDiffusionImgaImgPipeline( unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case ) _lowerCAmelCase = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = """A painting of a squirrel eating a burger""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = alt_pipe( [prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , image=__snake_case , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowercase__ ( self : Optional[Any] ) -> Optional[Any]: _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) # resize to resolution that is divisible by 8 but not 16 or 32 _lowerCAmelCase = init_image.resize((7_60, 5_04) ) _lowerCAmelCase = """BAAI/AltDiffusion""" _lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained( __snake_case , safety_checker=__snake_case , ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = """A fantasy landscape, trending on artstation""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , ) _lowerCAmelCase = output.images[0] _lowerCAmelCase = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) _lowerCAmelCase = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict ) -> Tuple: _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _lowerCAmelCase = init_image.resize((7_68, 5_12) ) _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" ) _lowerCAmelCase = """BAAI/AltDiffusion""" _lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained( __snake_case , safety_checker=__snake_case , ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() _lowerCAmelCase = """A fantasy landscape, trending on artstation""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , ) _lowerCAmelCase = output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
220
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Optional[Any] = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "owlvit_text_model" def __init__( self : Dict ,A : Any=4_94_08 ,A : Optional[Any]=5_12 ,A : Optional[Any]=20_48 ,A : int=12 ,A : List[Any]=8 ,A : str=16 ,A : Dict="quick_gelu" ,A : str=1E-5 ,A : Dict=0.0 ,A : str=0.02 ,A : List[Any]=1.0 ,A : List[Any]=0 ,A : int=4_94_06 ,A : List[Any]=4_94_07 ,**A : List[str] ,): super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ) __A = vocab_size __A = hidden_size __A = intermediate_size __A = num_hidden_layers __A = num_attention_heads __A = max_position_embeddings __A = hidden_act __A = layer_norm_eps __A = attention_dropout __A = initializer_range __A = initializer_factor @classmethod def UpperCamelCase_ ( cls : List[str] ,A : Union[str, os.PathLike] ,**A : Optional[Any] ): cls._set_token_in_kwargs(A ) __A , __A = cls.get_config_dict(A ,**A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": __A = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A ,**A ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "owlvit_vision_model" def __init__( self : List[Any] ,A : str=7_68 ,A : Tuple=30_72 ,A : List[str]=12 ,A : str=12 ,A : List[str]=3 ,A : List[str]=7_68 ,A : Optional[Any]=32 ,A : Optional[int]="quick_gelu" ,A : List[str]=1E-5 ,A : Optional[int]=0.0 ,A : Union[str, Any]=0.02 ,A : Optional[Any]=1.0 ,**A : Union[str, Any] ,): super().__init__(**A ) __A = hidden_size __A = intermediate_size __A = num_hidden_layers __A = num_attention_heads __A = num_channels __A = image_size __A = patch_size __A = hidden_act __A = layer_norm_eps __A = attention_dropout __A = initializer_range __A = initializer_factor @classmethod def UpperCamelCase_ ( cls : Union[str, Any] ,A : Union[str, os.PathLike] ,**A : List[Any] ): cls._set_token_in_kwargs(A ) __A , __A = cls.get_config_dict(A ,**A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": __A = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A ,**A ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "owlvit" snake_case_ = True def __init__( self : Any ,A : List[Any]=None ,A : Tuple=None ,A : Optional[Any]=5_12 ,A : Tuple=2.65_92 ,A : Dict=True ,**A : Optional[Any] ,): super().__init__(**A ) if text_config is None: __A = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: __A = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) __A = OwlViTTextConfig(**A ) __A = OwlViTVisionConfig(**A ) __A = projection_dim __A = logit_scale_init_value __A = return_dict __A = 1.0 @classmethod def UpperCamelCase_ ( cls : List[Any] ,A : Union[str, os.PathLike] ,**A : List[Any] ): cls._set_token_in_kwargs(A ) __A , __A = cls.get_config_dict(A ,**A ) if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A ,**A ) @classmethod def UpperCamelCase_ ( cls : Dict ,A : Dict ,A : Dict ,**A : Any ): __A = {} __A = text_config __A = vision_config return cls.from_dict(A ,**A ) def UpperCamelCase_ ( self : Any ): __A = copy.deepcopy(self.__dict__ ) __A = self.text_config.to_dict() __A = self.vision_config.to_dict() __A = self.__class__.model_type return output class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def UpperCamelCase_ ( self : Dict ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def UpperCamelCase_ ( self : List[str] ): return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def UpperCamelCase_ ( self : Dict ): return 1E-4 def UpperCamelCase_ ( self : Union[str, Any] ,A : "ProcessorMixin" ,A : int = -1 ,A : int = -1 ,A : Optional["TensorType"] = None ,): __A = super().generate_dummy_inputs( processor.tokenizer ,batch_size=A ,seq_length=A ,framework=A ) __A = super().generate_dummy_inputs( processor.image_processor ,batch_size=A ,framework=A ) return {**text_input_dict, **image_input_dict} @property def UpperCamelCase_ ( self : Any ): return 14
15
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCAmelCase_ = get_tests_dir('fixtures') UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json') UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json') class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = 0 def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) __lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ ) # save in new folder model_config.save_pretrained(UpperCamelCase_ ) config.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) # make sure private variable is not incorrectly saved __lowerCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): with self.assertRaisesRegex( UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCAmelCase__ ( self: Tuple ): with self.assertRaisesRegex( UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" ) def lowerCAmelCase__ ( self: Optional[Any] ): with self.assertRaisesRegex( UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCAmelCase__ ( self: Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCAmelCase__ ( self: Any ): try: AutoConfig.register("""custom""" , UpperCamelCase_ ) AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API __lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self: Dict ): class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = True try: AutoConfig.register("""custom""" , UpperCamelCase_ ) AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # If remote code is not set, the default is to use local __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
12
0
"""simple docstring""" UpperCAmelCase : List[Any] = range(2, 20 + 1) UpperCAmelCase : Optional[int] = [10**k for k in range(ks[-1] + 1)] UpperCAmelCase : dict[int, dict[int, list[list[int]]]] = {} def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : str = sum(a_i[j] for j in range(_UpperCamelCase , len(_UpperCamelCase ) ) ) __UpperCAmelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_UpperCamelCase ) , _UpperCamelCase ) ) ) __UpperCAmelCase : str = 0, 0 __UpperCAmelCase : Optional[int] = n - i __UpperCAmelCase : Optional[int] = memo.get(_UpperCamelCase ) if sub_memo is not None: __UpperCAmelCase : Any = sub_memo.get(_UpperCamelCase ) if jumps is not None and len(_UpperCamelCase ) > 0: # find and make the largest jump without going over __UpperCAmelCase : List[Any] = -1 for _k in range(len(_UpperCamelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __UpperCAmelCase : List[Any] = _k break if max_jump >= 0: __UpperCAmelCase : Tuple = jumps[max_jump] # since the difference between jumps is cached, add c __UpperCAmelCase : Dict = diff + c for j in range(min(_UpperCamelCase , len(_UpperCamelCase ) ) ): __UpperCAmelCase : str = divmod(_UpperCamelCase , 1_0 ) if new_c > 0: add(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: __UpperCAmelCase : str = [] else: __UpperCAmelCase : Dict = {c: []} __UpperCAmelCase : Union[str, Any] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __UpperCAmelCase : Union[str, Any] = next_term(_UpperCamelCase , k - 1 , i + dn , _UpperCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __UpperCAmelCase : Dict = compute(_UpperCamelCase , _UpperCamelCase , i + dn , _UpperCamelCase ) diff += _diff dn += terms_jumped __UpperCAmelCase : Union[str, Any] = sub_memo[c] # keep jumps sorted by # of terms skipped __UpperCAmelCase : Optional[int] = 0 while j < len(_UpperCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_UpperCamelCase , (diff, dn, k) ) return (diff, dn) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(_UpperCamelCase ): a_i.extend([0 for _ in range(k - len(_UpperCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __UpperCAmelCase : Dict = i __UpperCAmelCase : List[Any] = 0, 0, 0 for j in range(len(_UpperCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __UpperCAmelCase : Union[str, Any] = ds_c + ds_b diff += addend __UpperCAmelCase : Tuple = 0 for j in range(_UpperCamelCase ): __UpperCAmelCase : int = a_i[j] + addend __UpperCAmelCase : str = divmod(_UpperCamelCase , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return diff, i - start_i def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ) -> Dict: '''simple docstring''' for j in range(_UpperCamelCase , len(_UpperCamelCase ) ): __UpperCAmelCase : Optional[Any] = digits[j] + addend if s >= 1_0: __UpperCAmelCase : List[Any] = divmod(_UpperCamelCase , 1_0 ) __UpperCAmelCase : Union[str, Any] = addend // 1_0 + quotient else: __UpperCAmelCase : List[str] = s __UpperCAmelCase : Optional[int] = addend // 1_0 if addend == 0: break while addend > 0: __UpperCAmelCase : str = divmod(_UpperCamelCase , 1_0 ) digits.append(_UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : int = 1_0**1_5 ) -> int: '''simple docstring''' __UpperCAmelCase : str = [1] __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[Any] = 0 while True: __UpperCAmelCase : List[Any] = next_term(_UpperCamelCase , 2_0 , i + dn , _UpperCamelCase ) dn += terms_jumped if dn == n - i: break __UpperCAmelCase : List[Any] = 0 for j in range(len(_UpperCamelCase ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(F"{solution() = }")
356
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import math UpperCamelCase : Union[str, Any] = 1_0 UpperCamelCase : List[str] = 7 UpperCamelCase : Any = BALLS_PER_COLOUR * NUM_COLOURS def A ( snake_case :int = 2_0 ) -> str: __UpperCamelCase = math.comb(snake_case , snake_case ) __UpperCamelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , snake_case ) __UpperCamelCase = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
316
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["image_processor", "tokenizer"] lowercase = "OwlViTImageProcessor" lowercase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('feature_extractor' ) __UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): __UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): __UpperCamelCase = [] # Maximum number of queries across batch __UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: __UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase )) __UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": __UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) __UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) __UpperCamelCase = BatchEncoding() __UpperCamelCase = input_ids __UpperCamelCase = attention_mask if query_images is not None: __UpperCamelCase = BatchEncoding() __UpperCamelCase = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values __UpperCamelCase = query_pixel_values if images is not None: __UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , ) return self.image_processor
316
1
"""simple docstring""" lowerCAmelCase_ : List[Any] = 9.80665 def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = g ): '''simple docstring''' if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
248
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase_ ( a_ ): def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]: """simple docstring""" super().__init__(*snake_case__ , **snake_case__ ) UpperCAmelCase = eval_examples UpperCAmelCase = post_process_function def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]: """simple docstring""" UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase = self.get_eval_dataloader(snake_case__ ) UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase = self.compute_metrics UpperCAmelCase = None UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCAmelCase = time.time() try: UpperCAmelCase = eval_loop( snake_case__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , ) finally: UpperCAmelCase = compute_metrics UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions ) UpperCAmelCase = self.compute_metrics(snake_case__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): UpperCAmelCase = metrics.pop(snake_case__ ) metrics.update(output.metrics ) else: UpperCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(snake_case__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ ) return metrics def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Dict: """simple docstring""" UpperCAmelCase = self.get_test_dataloader(snake_case__ ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase = self.compute_metrics UpperCAmelCase = None UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCAmelCase = time.time() try: UpperCAmelCase = eval_loop( snake_case__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , ) finally: UpperCAmelCase = compute_metrics UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions , """predict""" ) UpperCAmelCase = self.compute_metrics(snake_case__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): UpperCAmelCase = metrics.pop(snake_case__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
248
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ): _validate_point(_UpperCamelCase ) _validate_point(_UpperCamelCase ) if len(_UpperCamelCase ) != len(_UpperCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ) ) ) def __snake_case ( UpperCAmelCase_ : Tuple ): if point: if isinstance(_UpperCamelCase , _UpperCamelCase ): for item in point: if not isinstance(_UpperCamelCase , (int, float) ): lowerCamelCase_ = ( "Expected a list of numbers as input, found " F'''{type(_UpperCamelCase ).__name__}''' ) raise TypeError(_UpperCamelCase ) else: lowerCamelCase_ = F'''Expected a list of numbers as input, found {type(_UpperCamelCase ).__name__}''' raise TypeError(_UpperCamelCase ) else: raise ValueError("Missing an input" ) def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ): _validate_point(_UpperCamelCase ) _validate_point(_UpperCamelCase ) if len(_UpperCamelCase ) != len(_UpperCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCamelCase , _UpperCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
55
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _UpperCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCAmelCase = model __lowerCAmelCase = 2 __lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels ) def snake_case ( self ): pass def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase ) __lowerCAmelCase = LightningModel(_UpperCamelCase ) __lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model __lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : Optional[int] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
57
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) if is_vision_available(): import PIL class a ( UpperCAmelCase ): _lowercase = ["pixel_values"] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ): '''simple docstring''' super().__init__(**A_ ) _UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 224} _UpperCAmelCase : Dict = get_size_dict(A_ , default_to_square=A_ ) _UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCAmelCase : Dict = get_size_dict(A_ , default_to_square=A_ , param_name="crop_size" ) _UpperCAmelCase : str = do_resize _UpperCAmelCase : Dict = size _UpperCAmelCase : Union[str, Any] = resample _UpperCAmelCase : Optional[int] = do_center_crop _UpperCAmelCase : Optional[int] = crop_size _UpperCAmelCase : List[str] = do_rescale _UpperCAmelCase : Union[str, Any] = rescale_factor _UpperCAmelCase : Optional[Any] = do_normalize _UpperCAmelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _UpperCAmelCase : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD _UpperCAmelCase : Tuple = do_convert_rgb def _UpperCAmelCase ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ): '''simple docstring''' _UpperCAmelCase : int = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase : Optional[int] = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _UpperCAmelCase ( self , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' _UpperCAmelCase : str = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ ) def _UpperCAmelCase ( self , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ): '''simple docstring''' _UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase : int = size if size is not None else self.size _UpperCAmelCase : Optional[Any] = get_size_dict(A_ , param_name="size" , default_to_square=A_ ) _UpperCAmelCase : Tuple = resample if resample is not None else self.resample _UpperCAmelCase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase : Optional[int] = get_size_dict(A_ , param_name="crop_size" , default_to_square=A_ ) _UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase : int = image_std if image_std is not None else self.image_std _UpperCAmelCase : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCAmelCase : List[str] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCAmelCase : Optional[Any] = [convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. _UpperCAmelCase : Optional[Any] = [to_numpy_array(A_ ) for image in images] if do_resize: _UpperCAmelCase : Optional[int] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: _UpperCAmelCase : Any = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: _UpperCAmelCase : Optional[Any] = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: _UpperCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] _UpperCAmelCase : Optional[int] = [to_channel_dimension_format(A_ , A_ ) for image in images] _UpperCAmelCase : List[Any] = {"pixel_values": images} return BatchFeature(data=A_ , tensor_type=A_ )
189
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.17.0.dev0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) @dataclass class a : _lowercase = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) _lowercase = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) _lowercase = field( default=1_0_2_4 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) _lowercase = field( default=UpperCAmelCase , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) _lowercase = field( default=UpperCAmelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) _lowercase = field( default=UpperCAmelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) _lowercase = field( default=UpperCAmelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the training data."} ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."} ) _lowercase = field(default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the test data."} ) def _UpperCAmelCase ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: _UpperCAmelCase : int = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase : Optional[int] = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class a : _lowercase = field( default=UpperCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) _lowercase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) _lowercase = field( default=UpperCAmelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def __SCREAMING_SNAKE_CASE ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase : Tuple = training_args.get_process_log_level() logger.setLevel(lowerCAmelCase ) datasets.utils.logging.set_verbosity(lowerCAmelCase ) transformers.utils.logging.set_verbosity(lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _UpperCAmelCase : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase : int = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase : Union[str, Any] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(F'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files _UpperCAmelCase : List[str] = load_dataset("csv" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase : Dict = load_dataset("json" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase : List[str] = raw_datasets["train"].features["label"].names _UpperCAmelCase : Tuple = len(lowerCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase : Tuple = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase , ) _UpperCAmelCase : Union[str, Any] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase : Tuple = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase : List[Any] = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase : List[Any] = {"Refused": 0, "Entailed": 1} _UpperCAmelCase : Optional[int] = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) _UpperCAmelCase : int = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowerCAmelCase: str ): # Tokenize the texts def _convert_table_text_to_pandas(lowerCAmelCase: List[Any] ): _UpperCAmelCase : Any = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] _UpperCAmelCase : List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase : Tuple = examples["statement"] _UpperCAmelCase : List[Any] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) _UpperCAmelCase : Dict = tokenizer(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase ) _UpperCAmelCase : List[str] = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): _UpperCAmelCase : List[Any] = raw_datasets.map( lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : Dict = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Union[str, Any] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : Any = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) _UpperCAmelCase : Dict = raw_datasets["test"] if data_args.max_predict_samples is not None: _UpperCAmelCase : Any = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ): logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCAmelCase: EvalPrediction ): _UpperCAmelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions _UpperCAmelCase : Optional[Any] = np.argmax(lowerCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase : str = default_data_collator elif training_args.fpaa: _UpperCAmelCase : int = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase : List[str] = None # Initialize our Trainer _UpperCAmelCase : List[Any] = Trainer( model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase : List[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : Dict = last_checkpoint _UpperCAmelCase : str = trainer.train(resume_from_checkpoint=lowerCAmelCase ) _UpperCAmelCase : Tuple = train_result.metrics _UpperCAmelCase : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase ) ) _UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , lowerCAmelCase ) trainer.save_metrics("train" , lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase ) _UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) ) trainer.log_metrics("eval" , lowerCAmelCase ) trainer.save_metrics("eval" , lowerCAmelCase ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase : int = predict_dataset.remove_columns("label" ) _UpperCAmelCase : Any = trainer.predict(lowerCAmelCase , metric_key_prefix="predict" ).predictions _UpperCAmelCase : List[str] = np.argmax(lowerCAmelCase , axis=1 ) _UpperCAmelCase : int = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(lowerCAmelCase , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(lowerCAmelCase ): _UpperCAmelCase : List[Any] = label_list[item] writer.write(F'{index}\t{item}\n' ) _UpperCAmelCase : int = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**lowerCAmelCase ) else: trainer.create_model_card(**lowerCAmelCase ) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
189
1
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy lowerCAmelCase__ : List[Any] = logging.getLogger(__name__) lowerCAmelCase__ : List[Any] = '''pytorch_model.bin''' @dataclasses.dataclass class snake_case : """simple docstring""" snake_case__ = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class snake_case : """simple docstring""" snake_case__ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) snake_case__ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "A csv or a json file containing the validation data."} ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "The name of the task to train on."} , ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class snake_case : """simple docstring""" snake_case__ = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) snake_case__ = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) snake_case__ = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) snake_case__ = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) snake_case__ = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) snake_case__ = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) snake_case__ = dataclasses.field( default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) snake_case__ = dataclasses.field( default=_lowercase , metadata={"help": "Random seed for initialization."} , ) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: UpperCAmelCase__ = dataset.filter(lambda lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 UpperCAmelCase__ = int(eval_result * len(_lowercase ) ) print(_lowercase ) UpperCAmelCase__ = dataset.sort('probability' , reverse=_lowercase ) UpperCAmelCase__ = dataset.select(range(_lowercase ) ) UpperCAmelCase__ = dataset.remove_columns(['label', 'probability'] ) UpperCAmelCase__ = dataset.rename_column('prediction' , 'label' ) UpperCAmelCase__ = dataset.map(lambda lowerCamelCase : {"label": idalabel[example["label"]]} ) UpperCAmelCase__ = dataset.shuffle(seed=args.seed ) UpperCAmelCase__ = os.path.join(_lowercase , f'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(_lowercase , index=_lowercase ) else: dataset.to_json(_lowercase ) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): UpperCAmelCase__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() UpperCAmelCase__ = STModelArguments(model_name_or_path=_lowercase ) UpperCAmelCase__ = STDataArguments(train_file=_lowercase , infer_file=_lowercase ) UpperCAmelCase__ = STTrainingArguments(output_dir=_lowercase ) UpperCAmelCase__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_lowercase ).items(): setattr(_lowercase , _lowercase , _lowercase ) for key, value in kwargs.items(): if hasattr(_lowercase , _lowercase ): setattr(_lowercase , _lowercase , _lowercase ) # Sanity checks UpperCAmelCase__ = {} UpperCAmelCase__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None UpperCAmelCase__ = args.train_file UpperCAmelCase__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None UpperCAmelCase__ = args.eval_file for key in data_files: UpperCAmelCase__ = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: UpperCAmelCase__ = extension else: assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) UpperCAmelCase__ = f'''{args.output_dir}/self-train_iter-{{}}'''.format UpperCAmelCase__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_lowercase ) os.makedirs(_lowercase , exist_ok=_lowercase ) accelerator.wait_for_everyone() UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = 0 UpperCAmelCase__ = False # Show the progress bar UpperCAmelCase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): UpperCAmelCase__ = data_dir_format(_lowercase ) assert os.path.exists(_lowercase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 UpperCAmelCase__ = os.path.join(_lowercase , 'stage-1' ) UpperCAmelCase__ = { '''accelerator''': accelerator, '''model_name_or_path''': args.model_name_or_path, '''cache_dir''': args.cache_dir, '''do_train''': True, '''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''], '''do_eval''': True if args.eval_file is not None else False, '''eval_file''': data_files['''eval'''], '''do_predict''': True, '''infer_file''': data_files['''infer'''], '''task_name''': args.task_name, '''label_list''': args.label_list, '''output_dir''': current_output_dir, '''eval_metric''': args.eval_metric, '''evaluation_strategy''': args.evaluation_strategy, '''early_stopping_patience''': args.early_stopping_patience, '''early_stopping_threshold''': args.early_stopping_threshold, '''seed''': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_lowercase , _lowercase ): arguments_dict.update({key: value} ) UpperCAmelCase__ = os.path.join(_lowercase , 'best-checkpoint' , _lowercase ) if os.path.exists(_lowercase ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _lowercase , _lowercase , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _lowercase ) finetune(**_lowercase ) accelerator.wait_for_everyone() assert os.path.exists(_lowercase ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , _lowercase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data UpperCAmelCase__ = os.path.join(_lowercase , 'best-checkpoint' ) UpperCAmelCase__ = os.path.join(_lowercase , 'stage-2' ) # Update arguments_dict UpperCAmelCase__ = model_path UpperCAmelCase__ = data_files['''train'''] UpperCAmelCase__ = current_output_dir UpperCAmelCase__ = os.path.join(_lowercase , 'best-checkpoint' , _lowercase ) if os.path.exists(_lowercase ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _lowercase , _lowercase , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _lowercase ) finetune(**_lowercase ) accelerator.wait_for_everyone() assert os.path.exists(_lowercase ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , _lowercase ) UpperCAmelCase__ = iteration UpperCAmelCase__ = data_dir_format(iteration + 1 ) UpperCAmelCase__ = AutoConfig.from_pretrained(os.path.join(_lowercase , 'best-checkpoint' ) ) UpperCAmelCase__ = config.idalabel UpperCAmelCase__ = os.path.join(_lowercase , 'eval_results_best-checkpoint.json' ) UpperCAmelCase__ = os.path.join(_lowercase , 'test_results_best-checkpoint.json' ) assert os.path.exists(_lowercase ) with open(_lowercase , 'r' ) as f: UpperCAmelCase__ = float(json.load(_lowercase )[args.eval_metric] ) UpperCAmelCase__ = os.path.join(_lowercase , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(_lowercase ) # Loading the dataset from local csv or json files. UpperCAmelCase__ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['''data'''] UpperCAmelCase__ = load_dataset('csv' , data_files={'data': infer_output_file} )['''data'''] if accelerator.is_main_process: os.makedirs(_lowercase , exist_ok=_lowercase ) shutil.copy(_lowercase , os.path.join(_lowercase , f'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(_lowercase ): shutil.copy(_lowercase , os.path.join(_lowercase , f'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) accelerator.wait_for_everyone() UpperCAmelCase__ = os.path.join(_lowercase , f'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: UpperCAmelCase__ = eval_result if best_iteration is None: UpperCAmelCase__ = new_iteration UpperCAmelCase__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: UpperCAmelCase__ = new_iteration UpperCAmelCase__ = new_eval_result UpperCAmelCase__ = 0 else: if new_eval_result == best_eval_result: UpperCAmelCase__ = new_iteration UpperCAmelCase__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: UpperCAmelCase__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , _lowercase ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , _lowercase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(_lowercase , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , _lowercase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_lowercase , 'eval_results_best-iteration.json' ) , )
98
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class __lowercase ( _lowercase , unittest.TestCase ): lowerCamelCase : Any = PegasusTokenizer lowerCamelCase : Optional[Any] = PegasusTokenizerFast lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = True def UpperCAmelCase__ (self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ : Optional[int] = PegasusTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ (self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def UpperCAmelCase__ (self , **A ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **A ) def UpperCAmelCase__ (self , A ): return ("This is a test", "This is a test") def UpperCAmelCase__ (self ): lowerCamelCase_ : str = '''</s>''' lowerCamelCase_ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def UpperCAmelCase__ (self ): lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(A ) , 1_1_0_3 ) def UpperCAmelCase__ (self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCamelCase_ : str = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0] lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0] self.assertListEqual(A , A ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0] self.assertListEqual(A , A ) def UpperCAmelCase__ (self ): lowerCamelCase_ : int = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.''' lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0] self.assertListEqual(A , A ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def UpperCAmelCase__ (self ): lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example'''] lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' ) lowerCamelCase_ : Dict = self._large_tokenizer( text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(A ) == 2 # input_ids, attention_mask. @slow def UpperCAmelCase__ (self ): # fmt: off lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class __lowercase ( _lowercase , unittest.TestCase ): lowerCamelCase : str = PegasusTokenizer lowerCamelCase : Optional[Any] = PegasusTokenizerFast lowerCamelCase : Tuple = True lowerCamelCase : str = True def UpperCAmelCase__ (self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ (self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def UpperCAmelCase__ (self , **A ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **A ) def UpperCAmelCase__ (self , A ): return ("This is a test", "This is a test") def UpperCAmelCase__ (self ): lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCamelCase_ : Tuple = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0] lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0] self.assertListEqual(A , A ) @require_torch def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example'''] lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' ) lowerCamelCase_ : Optional[int] = self._large_tokenizer( text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(A ) == 2 # input_ids, attention_mask. def UpperCAmelCase__ (self ): lowerCamelCase_ : int = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids self.assertListEqual( A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
318
0
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Tuple = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = GPTSwaTokenizer lowercase = False lowercase = True lowercase = False def _lowercase( self ) -> Any: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Dict = GPTSwaTokenizer(A , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> Optional[Any]: UpperCAmelCase : str = """This is a test""" UpperCAmelCase : Optional[Any] = """This is a test""" return input_text, output_text def _lowercase( self ) -> Optional[int]: UpperCAmelCase : List[str] = """<s>""" UpperCAmelCase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(A ) , 2000 ) def _lowercase( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : str = GPTSwaTokenizer(A ) UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [465, 287, 265, 631, 842] ) UpperCAmelCase : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , ) # fmt: on UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(A ) # fmt: off self.assertListEqual( A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def _lowercase( self ) -> List[str]: UpperCAmelCase : Any = GPTSwaTokenizer(A ) UpperCAmelCase : Union[str, Any] = ["""This is a test""", """I was born in 92000, and this is falsé."""] UpperCAmelCase : Any = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(A , A ): self.assertListEqual(tokenizer.encode_fast(A ) , A ) # Test that decode_fast returns the input text for text, token_ids in zip(A , A ): self.assertEqual(tokenizer.decode_fast(A ) , A ) @slow def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off UpperCAmelCase : Optional[Any] = {"""input_ids""": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=A , )
338
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A = None , A = None , A = False , **A , ) -> Tuple: super().__init__(features=A , cache_dir=A , keep_in_memory=A , **A ) UpperCAmelCase : Any = Sql( cache_dir=A , features=A , sql=A , con=A , **A , ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = None UpperCAmelCase : Any = None UpperCAmelCase : int = None UpperCAmelCase : int = None self.builder.download_and_prepare( download_config=A , download_mode=A , verification_mode=A , base_path=A , ) # Build dataset for splits UpperCAmelCase : str = self.builder.as_dataset( split="""train""" , verification_mode=A , in_memory=self.keep_in_memory ) return dataset class UpperCamelCase_ : def __init__( self , A , A , A , A = None , A = None , **A , ) -> str: if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) UpperCAmelCase : Dict = dataset UpperCAmelCase : List[Any] = name UpperCAmelCase : Any = con UpperCAmelCase : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase : Optional[Any] = num_proc UpperCAmelCase : str = to_sql_kwargs def _lowercase( self ) -> int: UpperCAmelCase : Any = self.to_sql_kwargs.pop("""sql""" , A ) UpperCAmelCase : str = self.to_sql_kwargs.pop("""con""" , A ) UpperCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("""index""" , A ) UpperCAmelCase : str = self._write(index=A , **self.to_sql_kwargs ) return written def _lowercase( self , A ) -> Any: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = args UpperCAmelCase : Union[str, Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs UpperCAmelCase : int = query_table( table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , ) UpperCAmelCase : Any = batch.to_pandas() UpperCAmelCase : List[Any] = df.to_sql(self.name , self.con , index=A , **A ) return num_rows or len(A ) def _lowercase( self , A , **A ) -> int: UpperCAmelCase : Optional[int] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: UpperCAmelCase , UpperCAmelCase : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
338
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a_ :Any = logging.get_logger(__name__) a_ :Dict = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """resnet""" _SCREAMING_SNAKE_CASE = ["""basic""", """bottleneck"""] def __init__( self : Tuple, _snake_case : List[str]=3, _snake_case : Optional[int]=6_4, _snake_case : Tuple=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8], _snake_case : Tuple=[3, 4, 6, 3], _snake_case : Dict="bottleneck", _snake_case : List[Any]="relu", _snake_case : List[str]=False, _snake_case : Dict=None, _snake_case : List[Any]=None, **_snake_case : List[str], ) ->Any: super().__init__(**__lowerCAmelCase ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) snake_case__ : List[Any] = num_channels snake_case__ : Dict = embedding_size snake_case__ : Optional[Any] = hidden_sizes snake_case__ : Optional[int] = depths snake_case__ : Optional[Any] = layer_type snake_case__ : int = hidden_act snake_case__ : List[str] = downsample_in_first_stage snake_case__ : Optional[int] = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(__lowerCAmelCase ) + 1 )] snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase, out_indices=__lowerCAmelCase, stage_names=self.stage_names ) class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def lowercase_ ( self : Optional[Any] ) ->Optional[int]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowercase_ ( self : str ) ->Optional[int]: return 1e-3
277
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=9_9 , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=9 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase=8 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.002 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=None , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = encoder_seq_length lowerCamelCase__ = decoder_seq_length # For common tests lowerCamelCase__ = self.decoder_seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_attention_mask lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = d_ff lowerCamelCase__ = relative_attention_num_buckets lowerCamelCase__ = dropout_rate lowerCamelCase__ = initializer_factor lowerCamelCase__ = eos_token_id lowerCamelCase__ = pad_token_id lowerCamelCase__ = decoder_start_token_id lowerCamelCase__ = None lowerCamelCase__ = decoder_layers def __lowerCamelCase ( self ): '''simple docstring''' return TaConfig.from_pretrained('''google/umt5-base''' ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ): '''simple docstring''' if attention_mask is None: lowerCamelCase__ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCamelCase__ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCamelCase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCAmelCase ) if decoder_head_mask is None: lowerCamelCase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase ) if cross_attn_head_mask is None: lowerCamelCase__ = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCamelCase__ = input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase__ = self.get_config() lowerCamelCase__ = config.num_attention_heads lowerCamelCase__ = self.prepare_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, input_dict def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' return TaConfig( vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __lowerCamelCase ( self ): '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model( input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , ) lowerCamelCase__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ) lowerCamelCase__ = result.last_hidden_state lowerCamelCase__ = result.past_key_values lowerCamelCase__ = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__lowerCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ).get_decoder().to(__lowerCAmelCase ).eval() # first forward pass lowerCamelCase__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase ) self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) ) self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) + 1 ) lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase__ = model(__lowerCAmelCase )['''last_hidden_state'''] lowerCamelCase__ = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase )['''last_hidden_state'''] # select random slice lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).half().eval() lowerCamelCase__ = model(**__lowerCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__lowerCAmelCase ).any().item() ) @require_torch class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) lowerCAmelCase_ = (UMTaForConditionalGeneration,) if is_torch_available() else () lowerCAmelCase_ = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = True # The small UMT5 model needs higher percentages for CPU/MP tests lowerCAmelCase_ = [0.8, 0.9] def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() lowerCamelCase__ = UMTaModel(config_and_inputs[0] ).to(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__lowerCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() lowerCamelCase__ = config_and_inputs[0] lowerCamelCase__ = UMTaForConditionalGeneration(__lowerCAmelCase ).eval() model.to(__lowerCAmelCase ) lowerCamelCase__ = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ), } for attn_name, (name, mask) in zip(__lowerCAmelCase , head_masking.items() ): lowerCamelCase__ = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowerCamelCase__ = torch.ones( config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ) lowerCamelCase__ = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , **__lowerCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowerCamelCase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase ) lowerCamelCase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCAmelCase , legacy=__lowerCAmelCase ) lowerCamelCase__ = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_tensors='''pt''' , padding=__lowerCAmelCase ).input_ids # fmt: off lowerCamelCase__ = torch.tensor( [ [ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1], ] ) # fmt: on torch.testing.assert_allclose(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = model.generate(input_ids.to(__lowerCAmelCase ) ) lowerCamelCase__ = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
209
0
'''simple docstring''' lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def A_( A : int): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A)) def A_( ): return sum( number for number in range(1000 , 100_0000) if number == digits_fifth_powers_sum(A)) if __name__ == "__main__": print(solution())
360
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'} class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = """openai-gpt""" lowerCAmelCase_ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=40478 , A_=512 , A_=768 , A_=12 , A_=12 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_="cls_index" , A_=True , A_=None , A_=True , A_=0.1 , **A_ , )-> List[str]: '''simple docstring''' UpperCamelCase = vocab_size UpperCamelCase = n_positions UpperCamelCase = n_embd UpperCamelCase = n_layer UpperCamelCase = n_head UpperCamelCase = afn UpperCamelCase = resid_pdrop UpperCamelCase = embd_pdrop UpperCamelCase = attn_pdrop UpperCamelCase = layer_norm_epsilon UpperCamelCase = initializer_range UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_first_dropout UpperCamelCase = summary_proj_to_labels super().__init__(**A_ )
251
0
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCamelCase__ ( a__ : str ) -> str: UpperCamelCase_ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def lowerCamelCase__ ( a__ : List[str] ) -> Dict: UpperCamelCase_ , UpperCamelCase_ = emb.weight.shape UpperCamelCase_ = nn.Linear(a__ , a__ , bias=a__ ) UpperCamelCase_ = emb.weight.data return lin_layer def lowerCamelCase__ ( a__ : Union[str, Any] ) -> List[Any]: UpperCamelCase_ = torch.load(a__ , map_location="""cpu""" ) UpperCamelCase_ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] UpperCamelCase_ = mam_aaa["""model"""] remove_ignore_keys_(a__ ) UpperCamelCase_ = state_dict["""encoder.embed_tokens.weight"""].shape[0] UpperCamelCase_ = MaMaaaConfig( vocab_size=a__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) UpperCamelCase_ = state_dict["""decoder.embed_tokens.weight"""] UpperCamelCase_ = MaMaaaForConditionalGeneration(a__ ) model.model.load_state_dict(a__ , strict=a__ ) UpperCamelCase_ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
122
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): A__ : Any = DanceDiffusionPipeline A__ : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS A__ : List[Any] = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } A__ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS A__ : str = False A__ : Any = False def lowerCamelCase_ ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase_ = UNetaDModel( block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , ) UpperCamelCase_ = IPNDMScheduler() UpperCamelCase_ = { """unet""": unet, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ): """simple docstring""" if str(__UpperCamelCase ).startswith("""mps""" ): UpperCamelCase_ = torch.manual_seed(__UpperCamelCase ) else: UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) UpperCamelCase_ = { """batch_size""": 1, """generator""": generator, """num_inference_steps""": 4, } return inputs def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase_ = self.get_dummy_components() UpperCamelCase_ = DanceDiffusionPipeline(**__UpperCamelCase ) UpperCamelCase_ = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase ) UpperCamelCase_ = pipe(**__UpperCamelCase ) UpperCamelCase_ = output.audios UpperCamelCase_ = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) UpperCamelCase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowerCamelCase_ ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def lowerCamelCase_ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def lowerCamelCase_ ( self ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def lowerCamelCase_ ( self ): """simple docstring""" return super().test_attention_slicing_forward_pass() def lowerCamelCase_ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): def lowerCamelCase_ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = torch_device UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" ) UpperCamelCase_ = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 ) UpperCamelCase_ = output.audios UpperCamelCase_ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) UpperCamelCase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = torch_device UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa ) UpperCamelCase_ = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 ) UpperCamelCase_ = output.audios UpperCamelCase_ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) UpperCamelCase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
122
1
from PIL import Image def _A ( lowerCAmelCase_ : Image , lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(lowerCAmelCase_ : int ) -> int: return int(128 + factor * (c - 128) ) return img.point(lowerCAmelCase_ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('image_data/lena_high_contrast.png', format='png')
221
def _A ( lowerCAmelCase_ : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
221
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right snake_case_ = 25_0004 snake_case_ = 25_0020 @require_sentencepiece @require_tokenizers class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = MBartaaTokenizer __UpperCamelCase = MBartaaTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def UpperCAmelCase__ ( self :Dict ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]: UpperCAmelCase = '<s>' UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :Any ) -> Optional[Any]: UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(lowercase_ ) , 10_54 ) def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def UpperCAmelCase__ ( self :Any ) -> Dict: UpperCAmelCase = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ ) UpperCAmelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def UpperCAmelCase__ ( self :Optional[Any] ) -> Any: # fmt: off UpperCAmelCase = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def UpperCAmelCase__ ( self :List[str] ) -> Dict: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = tokenizer_r.save_pretrained(lowercase_ ) UpperCAmelCase = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) UpperCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way UpperCAmelCase = tokenizer_r.from_pretrained(lowercase_ ) UpperCAmelCase = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) UpperCAmelCase = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way UpperCAmelCase = tokenizer_r.from_pretrained(lowercase_ ) UpperCAmelCase = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) UpperCAmelCase = tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase = tokenizer_r.from_pretrained(lowercase_ ) UpperCAmelCase = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): """simple docstring""" __UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt""" __UpperCamelCase = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] __UpperCamelCase = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] __UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2] @classmethod def UpperCAmelCase__ ( cls :int ) -> Optional[int]: UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) UpperCAmelCase = 1 return cls def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 ) def UpperCAmelCase__ ( self :Dict ) -> Tuple: UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) def UpperCAmelCase__ ( self :int ) -> List[Any]: self.assertIn(lowercase_ , self.tokenizer.all_special_ids ) UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] UpperCAmelCase = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ ) UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertNotIn(self.tokenizer.eos_token , lowercase_ ) def UpperCAmelCase__ ( self :Tuple ) -> str: UpperCAmelCase = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , lowercase_ ) UpperCAmelCase = 10 UpperCAmelCase = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0] self.assertEqual(ids[0] , lowercase_ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] ) def UpperCAmelCase__ ( self :List[str] ) -> Optional[Any]: UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase_ ) UpperCAmelCase = MBartaaTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ ) @require_torch def UpperCAmelCase__ ( self :Tuple ) -> Any: UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' ) UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCAmelCase__ ( self :Tuple ) -> Dict: UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def UpperCAmelCase__ ( self :Tuple ) -> Any: UpperCAmelCase = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' ) UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' ) UpperCAmelCase = targets['input_ids'] UpperCAmelCase = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCAmelCase__ ( self :Tuple ) -> Any: UpperCAmelCase = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(lowercase_ ) , { # en_XX, A, test, EOS 'input_ids': [[25_00_04, 62, 30_34, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_00_01, } , )
78
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Any = "deta" lowerCAmelCase : Union[str, Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : str=9_00 , lowerCamelCase__ : Any=20_48 , lowerCamelCase__ : Optional[int]=6 , lowerCamelCase__ : str=20_48 , lowerCamelCase__ : Dict=8 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Union[str, Any]=10_24 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int="relu" , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[Any]=1.0 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=False , lowerCamelCase__ : Any="sine" , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=3_00 , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Union[str, Any]=5 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.2_5 , **lowerCamelCase__ : Optional[Any] , ) ->List[str]: '''simple docstring''' if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _UpperCAmelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): _UpperCAmelCase : Any = backbone_config.pop("model_type" ) _UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase : List[str] = config_class.from_dict(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = backbone_config _UpperCAmelCase : Optional[int] = num_queries _UpperCAmelCase : Union[str, Any] = max_position_embeddings _UpperCAmelCase : Union[str, Any] = d_model _UpperCAmelCase : str = encoder_ffn_dim _UpperCAmelCase : Optional[int] = encoder_layers _UpperCAmelCase : int = encoder_attention_heads _UpperCAmelCase : Union[str, Any] = decoder_ffn_dim _UpperCAmelCase : Tuple = decoder_layers _UpperCAmelCase : Union[str, Any] = decoder_attention_heads _UpperCAmelCase : Any = dropout _UpperCAmelCase : List[str] = attention_dropout _UpperCAmelCase : Union[str, Any] = activation_dropout _UpperCAmelCase : Optional[int] = activation_function _UpperCAmelCase : str = init_std _UpperCAmelCase : Tuple = init_xavier_std _UpperCAmelCase : Optional[Any] = encoder_layerdrop _UpperCAmelCase : int = auxiliary_loss _UpperCAmelCase : Union[str, Any] = position_embedding_type # deformable attributes _UpperCAmelCase : List[Any] = num_feature_levels _UpperCAmelCase : List[Any] = encoder_n_points _UpperCAmelCase : Tuple = decoder_n_points _UpperCAmelCase : Optional[int] = two_stage _UpperCAmelCase : Dict = two_stage_num_proposals _UpperCAmelCase : int = with_box_refine _UpperCAmelCase : str = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _UpperCAmelCase : Optional[int] = class_cost _UpperCAmelCase : Dict = bbox_cost _UpperCAmelCase : int = giou_cost # Loss coefficients _UpperCAmelCase : int = mask_loss_coefficient _UpperCAmelCase : List[Any] = dice_loss_coefficient _UpperCAmelCase : Dict = bbox_loss_coefficient _UpperCAmelCase : int = giou_loss_coefficient _UpperCAmelCase : Optional[Any] = eos_coefficient _UpperCAmelCase : int = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ ) @property def lowerCAmelCase__ ( self : int ) ->int: '''simple docstring''' return self.encoder_attention_heads @property def lowerCAmelCase__ ( self : List[Any] ) ->int: '''simple docstring''' return self.d_model def lowerCAmelCase__ ( self : Union[str, Any] ) ->int: '''simple docstring''' _UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) _UpperCAmelCase : Tuple = self.backbone_config.to_dict() _UpperCAmelCase : List[Any] = self.__class__.model_type return output
234
0
from collections import namedtuple import requests from lxml import html # type: ignore _UpperCAmelCase = namedtuple('covid_data', 'cases deaths recovered') def lowerCAmelCase_ ( UpperCamelCase_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data: UpperCamelCase_ = "//div[@class = \"maincounter-number\"]/span/text()" return covid_data(*html.fromstring(requests.get(UpperCamelCase_ ).content ).xpath(UpperCamelCase_ ) ) _UpperCAmelCase = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
328
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' _UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' _UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: return float((preds == labels).mean() ) def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple: UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) ) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: UpperCamelCase_ = {} for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}''' UpperCamelCase_ = id_pred["prediction"] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCamelCase_ = [(pred, label)] UpperCamelCase_ , UpperCamelCase_ = [], [] for question, preds_labels in question_map.items(): UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ ) UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" ) fas.append(UpperCamelCase_ ) UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) ) ems.append(UpperCamelCase_ ) UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) ) UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCamelCase ( datasets.Metric ): def lowercase ( self: Optional[int] ) -> Optional[int]: """simple docstring""" if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , ) def lowercase ( self: List[Any] ) -> int: """simple docstring""" if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "prediction_text": datasets.Value("string" ), }, "references": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "answers": datasets.Sequence(datasets.Value("string" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("int64" ), "paragraph": datasets.Value("int64" ), "question": datasets.Value("int64" ), }, "prediction": datasets.Value("int64" ), }, "references": datasets.Value("int64" ), } else: return { "predictions": datasets.Value("int64" ), "references": datasets.Value("int64" ), } def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict: """simple docstring""" if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name == "cb": return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" ) elif self.config_name == "record": UpperCamelCase_ = [ { "qas": [ {"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]} for ref in references ] } ] UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions} return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] elif self.config_name == "multirc": return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
328
1
"""simple docstring""" import argparse import os import re import packaging.version lowerCamelCase_ : Tuple = """examples/""" lowerCamelCase_ : str = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } lowerCamelCase_ : Dict = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } lowerCamelCase_ : Any = """README.md""" def _A ( lowercase , lowercase , lowercase ): """simple docstring""" with open(lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: a =f.read() a , a =REPLACE_PATTERNS[pattern] a =replace.replace('''VERSION''' , lowercase ) a =re_pattern.sub(lowercase , lowercase ) with open(lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(lowercase ) def _A ( lowercase ): """simple docstring""" for folder, directories, fnames in os.walk(lowercase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern='''examples''' ) def _A ( lowercase , lowercase=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowercase , lowercase , lowercase ) if not patch: update_version_in_examples(lowercase ) def _A ( ): """simple docstring""" a ='''🤗 Transformers currently provides the following architectures''' a ='''1. Want to contribute a new model?''' with open(lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: a =f.readlines() # Find the start of the list. a =0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 a =start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): a =lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lowercase ) def _A ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: a =f.read() a =REPLACE_PATTERNS['''init'''][0].search(lowercase ).groups()[0] return packaging.version.parse(lowercase ) def _A ( lowercase=False ): """simple docstring""" a =get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: a =default_version.base_version elif patch: a =f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: a =f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. a =input(f'''Which version are you releasing? [{default_version}]''' ) if len(lowercase ) == 0: a =default_version print(f'''Updating version to {version}.''' ) global_version_update(lowercase , patch=lowercase ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A ( ): """simple docstring""" a =get_version() a =f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' a =current_version.base_version # Check with the user we got that right. a =input(f'''Which version are we developing now? [{dev_version}]''' ) if len(lowercase ) == 0: a =dev_version print(f'''Updating version to {version}.''' ) global_version_update(lowercase ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") lowerCamelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
81
"""simple docstring""" from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = DistilBertTokenizer __lowerCAmelCase = DistilBertTokenizerFast __lowerCAmelCase = True @slow def SCREAMING_SNAKE_CASE ( self ) -> int: a =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A ) a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A ) a =tokenizer.build_inputs_with_special_tokens(__A ) a =tokenizer.build_inputs_with_special_tokens(__A , __A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
81
1
"""simple docstring""" import qiskit def __A ( a_ :int , a_ :int) -> qiskit.result.counts.Counts: __a : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''') # Create a Quantum Circuit acting on the q register __a : Optional[Any] = qiskit.QuantumCircuit(a_ , a_) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0) circuit.x(1) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1]) # Execute the circuit on the qasm simulator __a : Any = qiskit.execute(a_ , a_ , shots=10_00) # Return the histogram data of the results of the experiment. return job.result().get_counts(a_) if __name__ == "__main__": A = single_qubit_measure(2, 2) print(F'Total count for various states are: {counts}')
188
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
188
1
from __future__ import annotations snake_case_ : Tuple = 10 def A (__A : list[int] ) -> list[int]: """simple docstring""" UpperCAmelCase_ = 1 UpperCAmelCase_ = max(__A ) while placement <= max_digit: # declare and initialize empty buckets UpperCAmelCase_ = [[] for _ in range(__A )] # split list_of_ints between the buckets for i in list_of_ints: UpperCAmelCase_ = int((i / placement) % RADIX ) buckets[tmp].append(__A ) # put each buckets' contents into list_of_ints UpperCAmelCase_ = 0 for b in range(__A ): for i in buckets[b]: UpperCAmelCase_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
51
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( a ): UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ : Dict = '''FlavaImageProcessor''' UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _snake_case , ) UpperCAmelCase_ = kwargs.pop('''feature_extractor''') UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(_snake_case , _snake_case) UpperCAmelCase_ = self.image_processor def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''') if text is not None: UpperCAmelCase_ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) if images is not None: UpperCAmelCase_ = self.image_processor( _snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , ) if text is not None and images is not None: encoding.update(_snake_case) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case) def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case) @property def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.tokenizer.model_input_names UpperCAmelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def lowerCamelCase ( self : str): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , ) return self.image_processor_class @property def lowerCamelCase ( self : Any): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , ) return self.image_processor
51
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = tempfile.mkdtemp() __magic_name__ : Optional[int] = BlipImageProcessor() __magic_name__ : Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) __magic_name__ : Tuple = BlipProcessor(_a , _a ) processor.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self , **_a ): return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer def SCREAMING_SNAKE_CASE ( self , **_a ): return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor def SCREAMING_SNAKE_CASE ( self ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __magic_name__ : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __magic_name__ : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __magic_name__ : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) __magic_name__ : int = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = self.get_image_processor() __magic_name__ : Dict = self.get_tokenizer() __magic_name__ : Union[str, Any] = BlipProcessor(tokenizer=_a , image_processor=_a ) __magic_name__ : List[str] = self.prepare_image_inputs() __magic_name__ : Dict = image_processor(_a , return_tensors="np" ) __magic_name__ : List[Any] = processor(images=_a , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = self.get_image_processor() __magic_name__ : Tuple = self.get_tokenizer() __magic_name__ : Tuple = BlipProcessor(tokenizer=_a , image_processor=_a ) __magic_name__ : Dict = "lower newer" __magic_name__ : Union[str, Any] = processor(text=_a ) __magic_name__ : Optional[Any] = tokenizer(_a , return_token_type_ids=_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = self.get_image_processor() __magic_name__ : Tuple = self.get_tokenizer() __magic_name__ : str = BlipProcessor(tokenizer=_a , image_processor=_a ) __magic_name__ : List[Any] = "lower newer" __magic_name__ : int = self.prepare_image_inputs() __magic_name__ : str = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = self.get_image_processor() __magic_name__ : str = self.get_tokenizer() __magic_name__ : Optional[Any] = BlipProcessor(tokenizer=_a , image_processor=_a ) __magic_name__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __magic_name__ : List[Any] = processor.batch_decode(_a ) __magic_name__ : List[str] = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = self.get_image_processor() __magic_name__ : List[str] = self.get_tokenizer() __magic_name__ : Tuple = BlipProcessor(tokenizer=_a , image_processor=_a ) __magic_name__ : Optional[Any] = "lower newer" __magic_name__ : List[str] = self.prepare_image_inputs() __magic_name__ : int = processor(text=_a , images=_a ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
41
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Optional[int] = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _snake_case ( snake_case ): UpperCamelCase__ = 'donut-swin' UpperCamelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , **_a , ): super().__init__(**_a ) __magic_name__ : Optional[int] = image_size __magic_name__ : Any = patch_size __magic_name__ : Tuple = num_channels __magic_name__ : Dict = embed_dim __magic_name__ : Dict = depths __magic_name__ : int = len(_a ) __magic_name__ : str = num_heads __magic_name__ : Tuple = window_size __magic_name__ : Dict = mlp_ratio __magic_name__ : List[str] = qkv_bias __magic_name__ : Any = hidden_dropout_prob __magic_name__ : str = attention_probs_dropout_prob __magic_name__ : Union[str, Any] = drop_path_rate __magic_name__ : List[Any] = hidden_act __magic_name__ : List[Any] = use_absolute_embeddings __magic_name__ : Union[str, Any] = layer_norm_eps __magic_name__ : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __magic_name__ : Tuple = int(embed_dim * 2 ** (len(_a ) - 1) )
41
1
'''simple docstring''' import requests def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase ={'Content-Type': 'application/json'} __lowercase =requests.post(_lowerCAmelCase , json={'text': message_body} , headers=_lowerCAmelCase ) if response.status_code != 200: __lowercase =( 'Request to slack returned an error ' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(_lowerCAmelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
166
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = """umt5""" lowerCAmelCase__ = ["""past_key_values"""] def __init__( self : Optional[int] , _lowerCAmelCase : int=2_5_0_1_1_2 , _lowerCAmelCase : Union[str, Any]=5_1_2 , _lowerCAmelCase : List[Any]=6_4 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[str]=1_2_8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=1e-6 , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : Union[str, Any]="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple="T5Tokenizer" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , **_lowerCAmelCase : int , ): '''simple docstring''' super().__init__( is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) __lowercase =vocab_size __lowercase =d_model __lowercase =d_kv __lowercase =d_ff __lowercase =num_layers __lowercase =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowercase =num_heads __lowercase =relative_attention_num_buckets __lowercase =relative_attention_max_distance __lowercase =dropout_rate __lowercase =layer_norm_epsilon __lowercase =initializer_factor __lowercase =feed_forward_proj __lowercase =use_cache __lowercase =self.feed_forward_proj.split('-') __lowercase =act_info[-1] __lowercase =act_info[0] == 'gated' if len(_lowerCAmelCase) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": __lowercase ='gelu_new' @property def __lowerCamelCase ( self : List[Any]): '''simple docstring''' return self.d_model @property def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self.num_heads @property def __lowerCamelCase ( self : int): '''simple docstring''' return self.num_layers class _UpperCamelCase ( A ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase ={ 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __lowercase ='past_encoder_sequence + sequence' __lowercase ={0: 'batch'} __lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase ={0: 'batch', 1: 'decoder_sequence'} __lowercase ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return 1_3 @property def __lowerCamelCase ( self : int): '''simple docstring''' return 5e-4
166
1
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def snake_case_ (__A : dict ) -> tuple: return (data["data"], data["target"]) def snake_case_ (__A : np.ndarray , __A : np.ndarray ) -> XGBClassifier: __lowerCAmelCase : List[Any] = XGBClassifier() classifier.fit(__A , __A ) return classifier def snake_case_ () -> None: __lowerCAmelCase : Dict = load_iris() __lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = data_handling(__A ) __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : int = train_test_split( __A , __A , test_size=0.25 ) __lowerCAmelCase : int = iris["""target_names"""] # Create an XGBoost Classifier from the training data __lowerCAmelCase : Dict = xgboost(__A , __A ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( __A , __A , __A , display_labels=__A , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
139
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def snake_case_ (__A : Optional[Any] ) -> Tuple: __lowerCAmelCase : Optional[int] = SwinConfig() __lowerCAmelCase : List[Any] = swin_name.split("""_""" ) __lowerCAmelCase : Dict = name_split[1] __lowerCAmelCase : Optional[Any] = int(name_split[4] ) __lowerCAmelCase : List[Any] = int(name_split[3][-1] ) if model_size == "tiny": __lowerCAmelCase : List[Any] = 9_6 __lowerCAmelCase : List[Any] = (2, 2, 6, 2) __lowerCAmelCase : Optional[Any] = (3, 6, 1_2, 2_4) elif model_size == "small": __lowerCAmelCase : List[Any] = 9_6 __lowerCAmelCase : Optional[int] = (2, 2, 1_8, 2) __lowerCAmelCase : Optional[int] = (3, 6, 1_2, 2_4) elif model_size == "base": __lowerCAmelCase : List[Any] = 1_2_8 __lowerCAmelCase : Tuple = (2, 2, 1_8, 2) __lowerCAmelCase : int = (4, 8, 1_6, 3_2) else: __lowerCAmelCase : List[Any] = 1_9_2 __lowerCAmelCase : List[str] = (2, 2, 1_8, 2) __lowerCAmelCase : int = (6, 1_2, 2_4, 4_8) if "in22k" in swin_name: __lowerCAmelCase : Dict = 2_1_8_4_1 else: __lowerCAmelCase : Optional[Any] = 1_0_0_0 __lowerCAmelCase : Union[str, Any] = """huggingface/label-files""" __lowerCAmelCase : Any = """imagenet-1k-id2label.json""" __lowerCAmelCase : Any = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase : int = {int(__A ): v for k, v in idalabel.items()} __lowerCAmelCase : str = idalabel __lowerCAmelCase : int = {v: k for k, v in idalabel.items()} __lowerCAmelCase : Optional[Any] = img_size __lowerCAmelCase : Optional[Any] = num_classes __lowerCAmelCase : Tuple = embed_dim __lowerCAmelCase : Union[str, Any] = depths __lowerCAmelCase : Optional[Any] = num_heads __lowerCAmelCase : Tuple = window_size return config def snake_case_ (__A : int ) -> Optional[Any]: if "patch_embed.proj" in name: __lowerCAmelCase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __lowerCAmelCase : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: __lowerCAmelCase : int = """encoder.""" + name if "attn.proj" in name: __lowerCAmelCase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __lowerCAmelCase : Optional[Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __lowerCAmelCase : Dict = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __lowerCAmelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCAmelCase : str = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": __lowerCAmelCase : Dict = """layernorm.weight""" if name == "norm.bias": __lowerCAmelCase : Optional[int] = """layernorm.bias""" if "head" in name: __lowerCAmelCase : int = name.replace("""head""" , """classifier""" ) else: __lowerCAmelCase : List[str] = """swin.""" + name return name def snake_case_ (__A : List[Any] , __A : str ) -> int: for key in orig_state_dict.copy().keys(): __lowerCAmelCase : Tuple = orig_state_dict.pop(__A ) if "mask" in key: continue elif "qkv" in key: __lowerCAmelCase : Any = key.split(""".""" ) __lowerCAmelCase : Union[str, Any] = int(key_split[1] ) __lowerCAmelCase : Optional[Any] = int(key_split[3] ) __lowerCAmelCase : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowerCAmelCase : List[str] = val[:dim, :] __lowerCAmelCase : List[Any] = val[ dim : dim * 2, : ] __lowerCAmelCase : str = val[-dim:, :] else: __lowerCAmelCase : str = val[ :dim ] __lowerCAmelCase : int = val[ dim : dim * 2 ] __lowerCAmelCase : int = val[ -dim: ] else: __lowerCAmelCase : Tuple = val return orig_state_dict def snake_case_ (__A : Union[str, Any] , __A : int ) -> Any: __lowerCAmelCase : List[Any] = timm.create_model(__A , pretrained=__A ) timm_model.eval() __lowerCAmelCase : str = get_swin_config(__A ) __lowerCAmelCase : Any = SwinForImageClassification(__A ) model.eval() __lowerCAmelCase : str = convert_state_dict(timm_model.state_dict() , __A ) model.load_state_dict(__A ) __lowerCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) __lowerCAmelCase : List[Any] = Image.open(requests.get(__A , stream=__A ).raw ) __lowerCAmelCase : List[str] = image_processor(images=__A , return_tensors="""pt""" ) __lowerCAmelCase : Tuple = timm_model(inputs["""pixel_values"""] ) __lowerCAmelCase : Dict = model(**__A ).logits assert torch.allclose(__A , __A , atol=1e-3 ) print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__A ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __UpperCAmelCase = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
139
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class __a ( snake_case_ ): __lowercase : Tuple = """mobilenet_v1""" def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=224 , lowerCAmelCase__=1.0 , lowerCAmelCase__=8 , lowerCAmelCase__="relu6" , lowerCAmelCase__=True , lowerCAmelCase__=0.9_9_9 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.0_0_1 , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) lowercase__: List[str] = num_channels lowercase__: List[str] = image_size lowercase__: Tuple = depth_multiplier lowercase__: Union[str, Any] = min_depth lowercase__: str = hidden_act lowercase__: Union[str, Any] = tf_padding lowercase__: Optional[int] = classifier_dropout_prob lowercase__: int = initializer_range lowercase__: Any = layer_norm_eps class __a ( snake_case_ ): __lowercase : Optional[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' return 1E-4
196
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __a = feature_size __a = sampling_rate __a = padding_value __a = kwargs.pop("padding_side" , "right" ) __a = kwargs.pop("return_attention_mask" , lowerCamelCase ) super().__init__(**lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) __a = processed_features[self.model_input_names[0]] __a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCamelCase ) == 0: if return_attention_mask: __a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __a = required_input[0] if isinstance(lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCamelCase ): __a = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCamelCase ): __a = "tf" elif is_torch_tensor(lowerCamelCase ): __a = "pt" elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ): __a = "np" else: raise ValueError( F"type of {first_element} unknown: {type(lowerCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __a = to_numpy(lowerCamelCase ) else: __a = [to_numpy(lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase ) __a = processed_features[self.model_input_names[0]] __a = len(lowerCamelCase ) if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) __a = [] for i in range(lowerCamelCase ): __a = {k: v[i] for k, v in processed_features.items()} # truncation __a = self._truncate( lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , ) truncated_inputs.append(lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __a = PaddingStrategy.MAX_LENGTH __a = {} for i in range(lowerCamelCase ): # padding __a = self._pad( truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __a = [] if value.dtype is np.dtype(np.floataa ): __a = value.astype(np.floataa ) batch_outputs[key].append(lowerCamelCase ) return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ): __a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __a = len(lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __a = np.ones(len(lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: __a = max_length - len(lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (0, difference) ) __a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (difference, 0) ) __a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) __a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = len(lowerCamelCase ) > max_length if needs_to_be_truncated: __a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __a = processed_features["attention_mask"][:max_length] return processed_features def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ): # Get padding strategy if padding is not False: if padding is True: __a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCamelCase , lowerCamelCase ): __a = PaddingStrategy(lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = padding else: __a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
261
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
98
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self : str , a__ : Union[str, Any] , a__ : Dict=13 , a__ : List[str]=32 , a__ : List[Any]=2 , a__ : List[str]=3 , a__ : Union[str, Any]=16 , a__ : Dict=[1, 2, 1] , a__ : Optional[Any]=[2, 2, 4] , a__ : List[str]=2 , a__ : Optional[Any]=2.0 , a__ : Union[str, Any]=True , a__ : int=0.0 , a__ : int=0.0 , a__ : Tuple=0.1 , a__ : List[str]="gelu" , a__ : str=False , a__ : Optional[int]=True , a__ : List[Any]=0.02 , a__ : Any=1E-5 , a__ : int=True , a__ : List[Any]=None , a__ : Dict=True , a__ : Optional[int]=10 , a__ : Any=8 , ): __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = num_heads __magic_name__ = window_size __magic_name__ = mlp_ratio __magic_name__ = qkv_bias __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = drop_path_rate __magic_name__ = hidden_act __magic_name__ = use_absolute_embeddings __magic_name__ = patch_norm __magic_name__ = layer_norm_eps __magic_name__ = initializer_range __magic_name__ = is_training __magic_name__ = scope __magic_name__ = use_labels __magic_name__ = type_sequence_label_size __magic_name__ = encoder_stride def snake_case__ ( self : List[Any] ): __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Optional[int] ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case__ ( self : Optional[int] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ): __magic_name__ = SwinvaModel(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ ) __magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : str , a__ : int ): __magic_name__ = SwinvaForMaskedImageModeling(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __magic_name__ = 1 __magic_name__ = SwinvaForMaskedImageModeling(a__ ) model.to(a__ ) model.eval() __magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case__ ( self : List[str] , a__ : List[str] , a__ : List[Any] , a__ : Any ): __magic_name__ = self.type_sequence_label_size __magic_name__ = SwinvaForImageClassification(a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Optional[Any] ): __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ): __SCREAMING_SNAKE_CASE :int = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE :Tuple = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE :Union[str, Any] = False __SCREAMING_SNAKE_CASE :List[Any] = False __SCREAMING_SNAKE_CASE :Dict = False __SCREAMING_SNAKE_CASE :Union[str, Any] = False def snake_case__ ( self : str ): __magic_name__ = SwinvaModelTester(self ) __magic_name__ = ConfigTester(self , config_class=a__ , embed_dim=37 ) def snake_case__ ( self : Tuple ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self : List[Any] ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def snake_case__ ( self : str ): pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def snake_case__ ( self : Union[str, Any] ): pass def snake_case__ ( self : Optional[int] ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(a__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a__ , nn.Linear ) ) def snake_case__ ( self : Union[str, Any] ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(a__ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def snake_case__ ( self : int ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = True for model_class in self.all_model_classes: __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(a__ , a__ ) ) __magic_name__ = outputs.attentions __magic_name__ = len(self.model_tester.depths ) self.assertEqual(len(a__ ) , a__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __magic_name__ = True __magic_name__ = config.window_size**2 __magic_name__ = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(a__ , a__ ) ) __magic_name__ = outputs.attentions self.assertEqual(len(a__ ) , a__ ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __magic_name__ = len(a__ ) # Check attention is always last and order is fine __magic_name__ = True __magic_name__ = True __magic_name__ = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(a__ , a__ ) ) if hasattr(self.model_tester , '''num_hidden_states_types''' ): __magic_name__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __magic_name__ = 2 self.assertEqual(out_len + added_hidden_states , len(a__ ) ) __magic_name__ = outputs.attentions self.assertEqual(len(a__ ) , a__ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def snake_case__ ( self : Any , a__ : Dict , a__ : str , a__ : str , a__ : List[Any] ): __magic_name__ = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(a__ , a__ ) ) __magic_name__ = outputs.hidden_states __magic_name__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a__ ) , a__ ) # Swinv2 has a different seq_length __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __magic_name__ = outputs.reshaped_hidden_states self.assertEqual(len(a__ ) , a__ ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = reshaped_hidden_states[0].shape __magic_name__ = ( reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case__ ( self : List[Any] ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(a__ , a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(a__ , a__ , a__ , a__ ) def snake_case__ ( self : Optional[Any] ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __magic_name__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __magic_name__ = True self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) ) def snake_case__ ( self : str ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a__ ) def snake_case__ ( self : Dict ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def snake_case__ ( self : Any ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = SwinvaModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def snake_case__ ( self : List[str] ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = _config_zero_init(a__ ) for model_class in self.all_model_classes: __magic_name__ = model_class(config=a__ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def snake_case__ ( self : Optional[Any] ): return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def snake_case__ ( self : Optional[int] ): __magic_name__ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( a__ ) __magic_name__ = self.default_image_processor __magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __magic_name__ = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ ) # forward pass with torch.no_grad(): __magic_name__ = model(**a__ ) # verify the logits __magic_name__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a__ ) __magic_name__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
98
1
class _lowercase : """simple docstring""" def __init__( self : Any , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : int = arr.split("," ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = [int(self.array[0] )] * len(self.array ) lowerCamelCase__ : int = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCamelCase__ : List[str] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCamelCase__ : Dict = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": A : Optional[Any] = input("please input some numbers:") A : List[Any] = SubArray(whole_array) A : int = array.solve_sub_array() print(("the results is:", re))
184
from manim import * class __magic_name__ ( lowerCAmelCase_ ): def __magic_name__ ( self ) -> Any: '''simple docstring''' __a =Rectangle(height=0.5 , width=0.5 ) __a =Rectangle(height=0.25 , width=0.25 ) __a =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __a =[mem.copy() for i in range(6 )] __a =[mem.copy() for i in range(6 )] __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 ) __a =Text('CPU' , font_size=24 ) __a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__snake_case ) __a =[mem.copy() for i in range(4 )] __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =Text('GPU' , font_size=24 ) __a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case ) gpu.move_to([-1, -1, 0] ) self.add(__snake_case ) __a =[mem.copy() for i in range(6 )] __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =Text('Model' , font_size=24 ) __a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case ) model.move_to([3, -1.0, 0] ) self.add(__snake_case ) __a =[] __a =[] __a =[] for i, rect in enumerate(__snake_case ): rect.set_stroke(__snake_case ) __a =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 ) self.add(__snake_case ) model_cpu_arr.append(__snake_case ) self.add(*__snake_case , *__snake_case , *__snake_case ) __a =[mem.copy() for i in range(6 )] __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =Text('Loaded Checkpoint' , font_size=24 ) __a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case ) checkpoint.move_to([3, 0.5, 0] ) self.add(__snake_case ) __a =[] __a =[] for i, rect in enumerate(__snake_case ): __a =fill.copy().set_fill(__snake_case , opacity=0.7 ) target.move_to(__snake_case ) ckpt_arr.append(__snake_case ) __a =target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__snake_case ) self.add(*__snake_case , *__snake_case ) __a =Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __a =MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__snake_case , __snake_case ) __a =MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__snake_case ) __a =MarkupText( f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) __a =[meta_mem.copy() for i in range(6 )] __a =[meta_mem.copy() for i in range(6 )] __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 ) __a =VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 ) __a =Text('Disk' , font_size=24 ) __a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) ) __a =[] for i, rect in enumerate(__snake_case ): __a =rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__snake_case , run_time=1.5 ) ) self.play(*__snake_case ) self.play(FadeOut(__snake_case ) ) __a =MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__snake_case , run_time=3 ) ) self.play( FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , ) self.wait()
218
0
class a : def __init__( self :Optional[Any] ): snake_case__ : str = '''''' snake_case__ : Union[str, Any] = '''''' snake_case__ : int = [] def __lowerCamelCase ( self :str ,__lowercase :int ,__lowercase :int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: snake_case__ : Dict = self.__min_dist_top_down_dp(m - 1 ,n - 1 ) else: snake_case__ : List[str] = self.__min_dist_top_down_dp(__lowercase ,n - 1 ) snake_case__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 ,__lowercase ) snake_case__ : int = self.__min_dist_top_down_dp(m - 1 ,n - 1 ) snake_case__ : Union[str, Any] = 1 + min(__lowercase ,__lowercase ,__lowercase ) return self.dp[m][n] def __lowerCamelCase ( self :Optional[int] ,__lowercase :str ,__lowercase :str ): snake_case__ : Optional[Any] = worda snake_case__ : Any = worda snake_case__ : Optional[Any] = [[-1 for _ in range(len(__lowercase ) )] for _ in range(len(__lowercase ) )] return self.__min_dist_top_down_dp(len(__lowercase ) - 1 ,len(__lowercase ) - 1 ) def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :str ): snake_case__ : List[str] = worda snake_case__ : Tuple = worda snake_case__ : Optional[Any] = len(__lowercase ) snake_case__ : Optional[int] = len(__lowercase ) snake_case__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty snake_case__ : str = j elif j == 0: # second string is empty snake_case__ : List[Any] = i elif worda[i - 1] == worda[j - 1]: # last characters are equal snake_case__ : Optional[int] = self.dp[i - 1][j - 1] else: snake_case__ : Optional[Any] = self.dp[i][j - 1] snake_case__ : List[str] = self.dp[i - 1][j] snake_case__ : Dict = self.dp[i - 1][j - 1] snake_case__ : Dict = 1 + min(__lowercase ,__lowercase ,__lowercase ) return self.dp[m][n] if __name__ == "__main__": A__ = EditDistance() print('''****************** Testing Edit Distance DP Algorithm ******************''') print() A__ = input('''Enter the first string: ''').strip() A__ = input('''Enter the second string: ''').strip() print() print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
358
import argparse from collections import defaultdict def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: """simple docstring""" snake_case__ : Dict = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__lowerCAmelCase , '''r''' ) as f: snake_case__ : str = f.readlines() snake_case__ : List[str] = f"""class {class_name}(""" snake_case__ : Any = f"""{4 * ' '}def {test_name}(""" snake_case__ : Optional[int] = f"""{8 * ' '}{correct_line.split()[0]}""" snake_case__ : List[str] = f"""{16 * ' '}{correct_line.split()[0]}""" snake_case__ : Any = False snake_case__ : Optional[int] = False snake_case__ : Optional[Any] = False snake_case__ : int = False snake_case__ : Union[str, Any] = 0 snake_case__ : str = 0 snake_case__ : Union[str, Any] = [] for line in lines: if line.startswith(__lowerCAmelCase ): snake_case__ : Optional[Any] = True elif in_class and line.startswith(__lowerCAmelCase ): snake_case__ : Optional[int] = True elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )): snake_case__ : int = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: snake_case__ : Tuple = True if in_class and in_func and in_line: if ")" not in line: continue else: snake_case__ : List[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * ' '}{correct_line}""" ) snake_case__ : Optional[int] = False else: new_lines.append(__lowerCAmelCase ) with open(__lowerCAmelCase , '''w''' ) as f: for line in new_lines: f.write(__lowerCAmelCase ) def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict: """simple docstring""" if fail is not None: with open(__lowerCAmelCase , '''r''' ) as f: snake_case__ : Optional[int] = {l.strip() for l in f.readlines()} else: snake_case__ : Tuple = None with open(__lowerCAmelCase , '''r''' ) as f: snake_case__ : Optional[int] = f.readlines() snake_case__ : Tuple = defaultdict(__lowerCAmelCase ) for line in correct_lines: snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) A__ = parser.parse_args() main(args.correct_filename, args.fail_filename)
44
0
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class __A ( a , a ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 128 , lowerCamelCase__ = 256 , lowerCamelCase__ = 2_000.0 , lowerCamelCase__ = 768 , lowerCamelCase__ = 12 , lowerCamelCase__ = 12 , lowerCamelCase__ = 64 , lowerCamelCase__ = 2048 , lowerCamelCase__ = 0.1 , ): """simple docstring""" super().__init__() __UpperCamelCase : str =nn.Sequential( nn.Linear(lowerCamelCase__ , d_model * 4 , bias=lowerCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase__ ) , nn.SiLU() , ) __UpperCamelCase : Any =nn.Embedding(lowerCamelCase__ , lowerCamelCase__ ) __UpperCamelCase : Optional[int] =False __UpperCamelCase : Dict =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =nn.Dropout(p=lowerCamelCase__ ) __UpperCamelCase : int =nn.ModuleList() for lyr_num in range(lowerCamelCase__ ): # FiLM conditional T5 decoder __UpperCamelCase : Tuple =DecoderLayer(d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) self.decoders.append(lowerCamelCase__ ) __UpperCamelCase : List[str] =TaLayerNorm(lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Dropout(p=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __UpperCamelCase : Tuple =get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __UpperCamelCase : Union[str, Any] =self.conditioning_emb(lowerCamelCase__ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __UpperCamelCase : Optional[int] =decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __UpperCamelCase : List[Any] =torch.broadcast_to( torch.arange(lowerCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __UpperCamelCase : Any =self.position_encoding(lowerCamelCase__ ) __UpperCamelCase : Any =self.continuous_inputs_projection(lowerCamelCase__ ) inputs += position_encodings __UpperCamelCase : Optional[Any] =self.dropout(lowerCamelCase__ ) # decoder: No padding present. __UpperCamelCase : Any =torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __UpperCamelCase : str =[(x, self.encoder_decoder_mask(lowerCamelCase__ , lowerCamelCase__ )) for x, y in encodings_and_masks] # cross attend style: concat encodings __UpperCamelCase : Any =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __UpperCamelCase : List[Any] =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __UpperCamelCase : Optional[Any] =lyr( lowerCamelCase__ , conditioning_emb=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )[0] __UpperCamelCase : Tuple =self.decoder_norm(lowerCamelCase__ ) __UpperCamelCase : Tuple =self.post_dropout(lowerCamelCase__ ) __UpperCamelCase : Dict =self.spec_out(lowerCamelCase__ ) return spec_out class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-6 ): """simple docstring""" super().__init__() __UpperCamelCase : Optional[int] =nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , dropout_rate=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ ) ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.layer[0]( lowerCamelCase__ , conditioning_emb=lowerCamelCase__ , attention_mask=lowerCamelCase__ , ) if encoder_hidden_states is not None: __UpperCamelCase : str =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) __UpperCamelCase : int =self.layer[1]( lowerCamelCase__ , key_value_states=lowerCamelCase__ , attention_mask=lowerCamelCase__ , ) # Apply Film Conditional Feed Forward layer __UpperCamelCase : Union[str, Any] =self.layer[-1](lowerCamelCase__ , lowerCamelCase__ ) return (hidden_states,) class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Any =TaLayerNorm(lowerCamelCase__ ) __UpperCamelCase : Tuple =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =Attention(query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , out_bias=lowerCamelCase__ , scale_qk=lowerCamelCase__ ) __UpperCamelCase : List[str] =nn.Dropout(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" __UpperCamelCase : int =self.layer_norm(lowerCamelCase__ ) if conditioning_emb is not None: __UpperCamelCase : str =self.FiLMLayer(lowerCamelCase__ , lowerCamelCase__ ) # Self-attention block __UpperCamelCase : int =self.attention(lowerCamelCase__ ) __UpperCamelCase : Any =hidden_states + self.dropout(lowerCamelCase__ ) return hidden_states class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Tuple =Attention(query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , out_bias=lowerCamelCase__ , scale_qk=lowerCamelCase__ ) __UpperCamelCase : List[str] =TaLayerNorm(lowerCamelCase__ , eps=lowerCamelCase__ ) __UpperCamelCase : Any =nn.Dropout(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" __UpperCamelCase : List[Any] =self.layer_norm(lowerCamelCase__ ) __UpperCamelCase : Tuple =self.attention( lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , ) __UpperCamelCase : int =hidden_states + self.dropout(lowerCamelCase__ ) return layer_output class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Any =TaDenseGatedActDense(d_model=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) __UpperCamelCase : Tuple =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase__ ) __UpperCamelCase : Dict =TaLayerNorm(lowerCamelCase__ , eps=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =nn.Dropout(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" __UpperCamelCase : List[str] =self.layer_norm(lowerCamelCase__ ) if conditioning_emb is not None: __UpperCamelCase : List[str] =self.film(lowerCamelCase__ , lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =self.DenseReluDense(lowerCamelCase__ ) __UpperCamelCase : Any =hidden_states + self.dropout(lowerCamelCase__ ) return hidden_states class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : List[str] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : List[Any] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : List[Any] =nn.Dropout(lowerCamelCase__ ) __UpperCamelCase : Any =NewGELUActivation() def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any =self.act(self.wi_a(lowerCamelCase__ ) ) __UpperCamelCase : Union[str, Any] =self.wi_a(lowerCamelCase__ ) __UpperCamelCase : Any =hidden_gelu * hidden_linear __UpperCamelCase : List[str] =self.dropout(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =self.wo(lowerCamelCase__ ) return hidden_states class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=1E-6 ): """simple docstring""" super().__init__() __UpperCamelCase : Any =nn.Parameter(torch.ones(lowerCamelCase__ ) ) __UpperCamelCase : str =eps def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase__ ) __UpperCamelCase : Dict =hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __UpperCamelCase : List[str] =hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class __A ( nn.Module ): """simple docstring""" def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCamelCase__ , 3.0 )) )) class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Optional[int] =nn.Linear(lowerCamelCase__ , out_features * 2 , bias=lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =self.scale_bias(lowerCamelCase__ ) __UpperCamelCase , __UpperCamelCase : Optional[int] =torch.chunk(lowerCamelCase__ , 2 , -1 ) __UpperCamelCase : int =x * (1 + scale) + shift return x
71
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 snake_case_ : List[Any] = data_utils.TransfoXLTokenizer snake_case_ : int = data_utils.TransfoXLCorpus snake_case_ : List[Any] = data_utils snake_case_ : int = data_utils def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__A , '''rb''' ) as fp: UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) UpperCAmelCase_ = corpus.vocab.__dict__ torch.save(__A , __A ) UpperCAmelCase_ = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , __A ) UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__A , __A ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCAmelCase_ = os.path.abspath(__A ) UpperCAmelCase_ = os.path.abspath(__A ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCAmelCase_ = TransfoXLConfig() else: UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A ) print(F"""Building PyTorch model from configuration: {config}""" ) UpperCAmelCase_ = TransfoXLLMHeadModel(__A ) UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A ) # Save pytorch-model UpperCAmelCase_ = os.path.join(__A , __A ) UpperCAmelCase_ = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": snake_case_ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) snake_case_ : int = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
51
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) lowerCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) lowerCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) lowerCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) lowerCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) lowerCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModel) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class lowerCamelCase ( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
367
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase ( _lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase = KandinskyVaaImgaImgPipeline UpperCAmelCase = ["""image_embeds""", """negative_image_embeds""", """image"""] UpperCAmelCase = [ """image_embeds""", """negative_image_embeds""", """image""", ] UpperCAmelCase = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase = False @property def _snake_case ( self ) -> Tuple: return 32 @property def _snake_case ( self ) -> Tuple: return 32 @property def _snake_case ( self ) -> Optional[Any]: return self.time_input_dim @property def _snake_case ( self ) -> List[Any]: return self.time_input_dim * 4 @property def _snake_case ( self ) -> Tuple: return 100 @property def _snake_case ( self ) -> Optional[Any]: torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _UpperCAmelCase : Tuple = UNetaDConditionModel(**a_ ) return model @property def _snake_case ( self ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _snake_case ( self ) -> Any: torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def _snake_case ( self ) -> Optional[Any]: _UpperCAmelCase : Optional[int] = self.dummy_unet _UpperCAmelCase : List[Any] = self.dummy_movq _UpperCAmelCase : Optional[int] = { """num_train_timesteps""": 1_000, """beta_schedule""": """linear""", """beta_start""": 0.0_0085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _UpperCAmelCase : Dict = DDIMScheduler(**a_ ) _UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _snake_case ( self ,a_ ,a_=0 ) -> Dict: _UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(a_ ) ).to(a_ ) _UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( a_ ) # create init_image _UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(a_ ) ).to(a_ ) _UpperCAmelCase : str = image.cpu().permute(0 ,2 ,3 ,1 )[0] _UpperCAmelCase : Tuple = Image.fromarray(np.uinta(a_ ) ).convert("""RGB""" ).resize((256, 256) ) if str(a_ ).startswith("""mps""" ): _UpperCAmelCase : Optional[int] = torch.manual_seed(a_ ) else: _UpperCAmelCase : Tuple = torch.Generator(device=a_ ).manual_seed(a_ ) _UpperCAmelCase : Optional[int] = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def _snake_case ( self ) -> Dict: _UpperCAmelCase : Dict = """cpu""" _UpperCAmelCase : int = self.get_dummy_components() _UpperCAmelCase : Optional[Any] = self.pipeline_class(**a_ ) _UpperCAmelCase : List[Any] = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : List[str] = pipe(**self.get_dummy_inputs(a_ ) ) _UpperCAmelCase : Optional[int] = output.images _UpperCAmelCase : Tuple = pipe( **self.get_dummy_inputs(a_ ) ,return_dict=a_ ,)[0] _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] _UpperCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase : Optional[int] = np.array( [0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Dict: _UpperCAmelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) _UpperCAmelCase : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _UpperCAmelCase : Optional[int] = """A red cartoon frog, 4k""" _UpperCAmelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(a_ ) _UpperCAmelCase : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" ,torch_dtype=torch.floataa ) _UpperCAmelCase : Any = pipeline.to(a_ ) pipeline.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase ,_UpperCAmelCase : Any = pipe_prior( a_ ,generator=a_ ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() _UpperCAmelCase : List[str] = pipeline( image=a_ ,image_embeds=a_ ,negative_image_embeds=a_ ,generator=a_ ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type="""np""" ,) _UpperCAmelCase : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a_ ,a_ )
215
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : int = RemBertConfig.from_json_file(lowerCAmelCase_ ) print("""Building PyTorch model from configuration: {}""".format(str(lowerCAmelCase_ ) ) ) _UpperCAmelCase : Any = RemBertModel(lowerCAmelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Save pytorch-model print("""Save PyTorch model to {}""".format(lowerCAmelCase_ ) ) torch.save(model.state_dict() , lowerCAmelCase_ ) if __name__ == "__main__": A_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A_ : Any = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
215
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def UpperCamelCase_ ( A__ : str=None ): '''simple docstring''' if subparsers is not None: lowerCAmelCase_ : Tuple = subparsers.add_parser("""test""" ) else: lowerCAmelCase_ : List[str] = argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""" , default=A__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def UpperCamelCase_ ( A__ : Any ): '''simple docstring''' lowerCAmelCase_ : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: lowerCAmelCase_ : int = script_name else: lowerCAmelCase_ : Union[str, Any] = f'--config_file={args.config_file} {script_name}' lowerCAmelCase_ : Optional[int] = ["""accelerate-launch"""] + test_args.split() lowerCAmelCase_ : List[str] = execute_subprocess_async(A__ , env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : Tuple = test_command_parser() lowerCAmelCase_ : str = parser.parse_args() test_command(A__ ) if __name__ == "__main__": main()
89
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ): '''simple docstring''' lowerCAmelCase_ : List[Any] = cva.getAffineTransform(A__ , A__ ) return cva.warpAffine(A__ , A__ , (rows, cols) ) if __name__ == "__main__": # read original image __A : Dict = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value __A : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __A , __A : Dict = gray_img.shape # set different points to rotate image __A : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __A : Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __A : Optional[Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __A : Dict = plt.figure(1) __A : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5) plt.show()
89
1
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING __lowerCamelCase = { # used to compute the property `self.chunk_length` '''EncodecConfig''': ['''overlap'''], # used as `self.bert_model = BertModel(config, ...)` '''DPRConfig''': True, # not used in modeling files, but it's an important information '''FSMTConfig''': ['''langs'''], # used internally in the configuration class file '''GPTNeoConfig''': ['''attention_types'''], # used internally in the configuration class file '''EsmConfig''': ['''is_folding_model'''], # used during training (despite we don't have training script for these models yet) '''Mask2FormerConfig''': ['''ignore_value'''], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) '''OneFormerConfig''': ['''ignore_value''', '''norm'''], # used during preprocessing and collation, see `collating_graphormer.py` '''GraphormerConfig''': ['''spatial_pos_max'''], # used internally in the configuration class file '''T5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally '''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], '''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], # used internally in the configuration class file '''LongT5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file '''SwitchTransformersConfig''': ['''feed_forward_proj'''], # having default values other than `1e-5` - we can't fix them without breaking '''BioGptConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''GLPNConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''SegformerConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''CvtConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''PerceiverConfig''': ['''layer_norm_eps'''], # used internally to calculate the feature size '''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate `mlp_dim` '''SamVisionConfig''': ['''mlp_ratio'''], # For (head) training, but so far not implemented '''ClapAudioConfig''': ['''num_classes'''], # Not used, but providing useful information to users '''SpeechT5HifiGanConfig''': ['''sampling_rate'''], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { '''CLIPSegConfig''': True, '''DeformableDetrConfig''': True, '''DetaConfig''': True, '''DinatConfig''': True, '''DonutSwinConfig''': True, '''EfficientFormerConfig''': True, '''FSMTConfig''': True, '''JukeboxConfig''': True, '''LayoutLMv2Config''': True, '''MaskFormerSwinConfig''': True, '''MT5Config''': True, '''NatConfig''': True, '''OneFormerConfig''': True, '''PerceiverConfig''': True, '''RagConfig''': True, '''SpeechT5Config''': True, '''SwinConfig''': True, '''Swin2SRConfig''': True, '''Swinv2Config''': True, '''SwitchTransformersConfig''': True, '''TableTransformerConfig''': True, '''TapasConfig''': True, '''TransfoXLConfig''': True, '''UniSpeechConfig''': True, '''UniSpeechSatConfig''': True, '''WavLMConfig''': True, '''WhisperConfig''': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) '''JukeboxPriorConfig''': True, # TODO: @Younes (for `is_decoder`) '''Pix2StructTextConfig''': True, } ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]: A_ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F'''config.{attribute}''' in modeling_source or F'''getattr(config, "{attribute}"''' in modeling_source or F'''getattr(self.config, "{attribute}"''' in modeling_source ): A_ = True # Deal with multi-line cases elif ( re.search( rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''', UpperCAmelCase__, ) is not None ): A_ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: A_ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files A_ = [ """bos_index""", """eos_index""", """pad_index""", """unk_index""", """mask_index""", """image_size""", """use_cache""", """out_features""", """out_indices""", ] A_ = ["""encoder_no_repeat_ngram_size"""] # Special cases to be allowed A_ = True if not attribute_used: A_ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: A_ = True elif attribute in ["tie_word_embeddings"] and default_value is False: A_ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: A_ = True elif attribute.endswith("""_token_id""" ): A_ = True # configuration class specific cases if not case_allowed: A_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] ) A_ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str: A_ = dict(inspect.signature(config_class.__init__ ).parameters ) A_ = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]] A_ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass A_ = {} if len(config_class.attribute_map ) > 0: A_ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files A_ = inspect.getsourcefile(UpperCAmelCase__ ) A_ = os.path.dirname(UpperCAmelCase__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. A_ = [os.path.join(UpperCAmelCase__, UpperCAmelCase__ ) for fn in os.listdir(UpperCAmelCase__ ) if fn.startswith("""modeling_""" )] # Get the source code strings A_ = [] for path in modeling_paths: if os.path.isfile(UpperCAmelCase__ ): with open(UpperCAmelCase__ ) as fp: modeling_sources.append(fp.read() ) A_ = [] for config_param, default_value in zip(UpperCAmelCase__, UpperCAmelCase__ ): # `attributes` here is all the variant names for `config_param` A_ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ): unused_attributes.append(attributes[0] ) return sorted(UpperCAmelCase__ ) def UpperCAmelCase__ ( ) -> List[str]: A_ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) A_ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ), lambda UpperCAmelCase__ : inspect.isclass(UpperCAmelCase__ ) and issubclass(UpperCAmelCase__, UpperCAmelCase__ ) and inspect.getmodule(UpperCAmelCase__ ) == inspect.getmodule(_config_class ), ) ] for config_class in config_classes_in_module: A_ = check_config_attributes_being_used(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: A_ = unused_attributes if len(UpperCAmelCase__ ) > 0: A_ = """The following configuration classes contain unused attributes in the corresponding modeling files:\n""" for name, attributes in configs_with_unused_attributes.items(): error += F'''{name}: {attributes}\n''' raise ValueError(UpperCAmelCase__ ) if __name__ == "__main__": check_config_attributes()
162
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __lowerCamelCase = logging.get_logger(__name__) class A__ ( _snake_case ): def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None: '''simple docstring''' warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
162
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) lowerCamelCase = { """input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } lowerCamelCase = model(_a )["""last_hidden_state"""] lowerCamelCase = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. lowerCamelCase = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
168
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def a__ ( snake_case__ ) -> Optional[Any]: lowerCamelCase = {} lowerCamelCase = job["""started_at"""] lowerCamelCase = job["""completed_at"""] lowerCamelCase = date_parser.parse(snake_case__ ) lowerCamelCase = date_parser.parse(snake_case__ ) lowerCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCamelCase = start lowerCamelCase = end lowerCamelCase = duration_in_min return job_info def a__ ( snake_case__ , snake_case__=None ) -> Optional[Any]: lowerCamelCase = None if token is not None: lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'} lowerCamelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' lowerCamelCase = requests.get(snake_case__ , headers=snake_case__ ).json() lowerCamelCase = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) lowerCamelCase = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(snake_case__ ): lowerCamelCase = requests.get(url + F'&page={i + 2}' , headers=snake_case__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") lowerCAmelCase : Any = parser.parse_args() lowerCAmelCase : Optional[int] = get_job_time(args.workflow_run_id) lowerCAmelCase : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v['duration']}""")
168
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _A ( A__ ): """simple docstring""" __lowercase , __lowercase = image.size __lowercase , __lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __lowercase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __lowercase = np.array(A__ ).astype(np.floataa ) / 2_5_5.0 __lowercase = image[None].transpose(0 , 3 , 1 , 2 ) __lowercase = torch.from_numpy(A__ ) return 2.0 * image - 1.0 class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : Union[str, Any] ,lowercase__ : VQModel ,lowercase__ : UNetaDModel ,lowercase__ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] ,): super().__init__() self.register_modules(vqvae=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ ) @torch.no_grad() def __call__( self : Dict ,lowercase__ : Union[torch.Tensor, PIL.Image.Image] = None ,lowercase__ : Optional[int] = 1 ,lowercase__ : Optional[int] = 1_0_0 ,lowercase__ : Optional[float] = 0.0 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,): if isinstance(lowercase__ ,PIL.Image.Image ): __lowercase = 1 elif isinstance(lowercase__ ,torch.Tensor ): __lowercase = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase__ )}" ) if isinstance(lowercase__ ,PIL.Image.Image ): __lowercase = preprocess(lowercase__ ) __lowercase , __lowercase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __lowercase = (batch_size, self.unet.config.in_channels // 2, height, width) __lowercase = next(self.unet.parameters() ).dtype __lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=self.device ,dtype=lowercase__ ) __lowercase = image.to(device=self.device ,dtype=lowercase__ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowercase__ ,device=self.device ) __lowercase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta for t in self.progress_bar(lowercase__ ): # concat latents and low resolution image in the channel dimension. __lowercase = torch.cat([latents, image] ,dim=1 ) __lowercase = self.scheduler.scale_model_input(lowercase__ ,lowercase__ ) # predict the noise residual __lowercase = self.unet(lowercase__ ,lowercase__ ).sample # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample # decode the image latents with the VQVAE __lowercase = self.vqvae.decode(lowercase__ ).sample __lowercase = torch.clamp(lowercase__ ,-1.0 ,1.0 ) __lowercase = image / 2 + 0.5 __lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(lowercase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase__ )
104
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["YolosFeatureExtractor"] lowercase__ = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
151
0
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case__ : """simple docstring""" def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_2_8 , __lowercase=3_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Dict: """simple docstring""" a__ : List[str] = parent a__ : Tuple = batch_size a__ : List[Any] = seq_length a__ : Tuple = is_training a__ : Dict = use_input_mask a__ : str = use_token_type_ids a__ : List[Any] = use_labels a__ : str = vocab_size a__ : int = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : Optional[int] = intermediate_size a__ : Optional[Any] = hidden_act a__ : Optional[Any] = hidden_dropout_prob a__ : List[str] = attention_probs_dropout_prob a__ : List[Any] = max_position_embeddings a__ : List[str] = type_vocab_size a__ : Any = type_sequence_label_size a__ : List[str] = initializer_range a__ : Any = num_labels a__ : str = num_choices a__ : int = scope def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Optional[int] = None if self.use_input_mask: a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) a__ : List[str] = None if self.use_token_type_ids: a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : Any = None a__ : List[Any] = None a__ : Optional[int] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : str = ids_tensor([self.batch_size] , self.num_choices ) a__ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Optional[int] = self.prepare_config_and_inputs() a__ : Dict = True a__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: """simple docstring""" a__ : Any = NezhaModel(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) a__ : Optional[Any] = model(__lowercase , token_type_ids=__lowercase ) a__ : Optional[Any] = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str: """simple docstring""" a__ : Any = True a__ : Optional[Any] = NezhaModel(__lowercase ) model.to(__lowercase ) model.eval() a__ : Union[str, Any] = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , ) a__ : str = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , ) a__ : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: """simple docstring""" a__ : List[Any] = NezhaForMaskedLM(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : Any = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: """simple docstring""" a__ : List[str] = NezhaForNextSentencePrediction(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : Dict = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: """simple docstring""" a__ : int = NezhaForPreTraining(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : str = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: """simple docstring""" a__ : Optional[Any] = NezhaForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : Dict = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: """simple docstring""" a__ : Optional[int] = self.num_labels a__ : str = NezhaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() a__ : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: """simple docstring""" a__ : Union[str, Any] = self.num_labels a__ : Tuple = NezhaForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = self.num_choices a__ : Dict = NezhaForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Any = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : str = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Tuple = config_and_inputs a__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class snake_case__ (A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase :Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __lowerCAmelCase :int = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase :Tuple = True def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=False ) -> Optional[int]: """simple docstring""" a__ : Dict = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) if return_labels: if model_class in get_values(__lowercase ): a__ : str = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase ) a__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = NezhaModelTester(self ) a__ : Tuple = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() a__ : List[Any] = None self.model_tester.create_and_check_model_as_decoder( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowercase ) @slow def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = NezhaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return a__ : List[Any] = True a__ : Tuple = model_class(config=__lowercase ) a__ : Optional[Any] = self._prepare_for_class(__lowercase , __lowercase ) a__ : str = torch.jit.trace( __lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowercase , os.path.join(__lowercase , """bert.pt""" ) ) a__ : List[Any] = torch.jit.load(os.path.join(__lowercase , """bert.pt""" ) , map_location=__lowercase ) loaded(inputs_dict["""input_ids"""].to(__lowercase ) , inputs_dict["""attention_mask"""].to(__lowercase ) ) @require_torch class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : List[str] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) a__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) a__ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): a__ : int = model(__lowercase , attention_mask=__lowercase )[0] a__ : List[Any] = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , __lowercase ) a__ : Dict = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : List[Any] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) a__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) a__ : Any = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): a__ : List[str] = model(__lowercase , attention_mask=__lowercase )[0] a__ : Any = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , __lowercase ) a__ : Dict = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) )
266
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class snake_case__ (A__ ): """simple docstring""" def __init__( self , __lowercase , __lowercase ) -> int: """simple docstring""" a__ : Tuple = params a__ : str = np.array(__lowercase ) a__ : List[Any] = np.array([len(__lowercase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowercase ) -> Any: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Dict: """simple docstring""" return len(self.lengths ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : int = self.params.max_model_input_size a__ : int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' ) def divide_chunks(__lowercase , __lowercase ): return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )] a__ : Any = [] a__ : Optional[int] = [] if self.params.mlm: a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: a__ : int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: a__ : str = np.insert(__lowercase , 0 , __lowercase ) if sub_s[-1] != sep_id: a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase ) assert len(__lowercase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowercase ) new_tok_ids.extend(__lowercase ) new_lengths.extend([len(__lowercase ) for l in sub_seqs] ) a__ : Optional[int] = np.array(__lowercase ) a__ : Any = np.array(__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" a__ : Union[str, Any] = len(self ) a__ : List[str] = self.lengths > 1_1 a__ : Dict = self.token_ids[indices] a__ : List[str] = self.lengths[indices] a__ : int = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""] a__ : List[Any] = len(self ) a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5 a__ : Tuple = self.token_ids[indices] a__ : Union[str, Any] = self.lengths[indices] a__ : Tuple = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]: """simple docstring""" a__ : Optional[int] = [t[0] for t in batch] a__ : Any = [t[1] for t in batch] assert len(__lowercase ) == len(__lowercase ) # Max for paddings a__ : List[Any] = max(__lowercase ) # Pad token ids if self.params.mlm: a__ : int = self.params.special_tok_ids["""pad_token"""] else: a__ : List[str] = self.params.special_tok_ids["""unk_token"""] a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids] assert len(tk_ ) == len(__lowercase ) assert all(len(__lowercase ) == max_seq_len_ for t in tk_ ) a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) a__ : Optional[int] = torch.tensor(__lowercase ) # (bs) return tk_t, lg_t
266
1
'''simple docstring''' A__ : Union[str, Any] =6_55_21 def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = 1 _lowerCAmelCase = 0 for plain_chr in plain_text: _lowerCAmelCase = (a + ord(lowerCamelCase__ )) % MOD_ADLER _lowerCAmelCase = (b + a) % MOD_ADLER return (b << 16) | a
70
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , **lowerCamelCase__ : Tuple ): '''simple docstring''' lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) lowerCamelCase = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
252
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Any = logging.get_logger(__name__) _A : Dict = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Optional[int] = "gptsan-japanese" _UpperCAmelCase : Union[str, Any] = [ "past_key_values", ] _UpperCAmelCase : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , A : Dict=3_6_0_0_0 , A : List[Any]=1_2_8_0 , A : Tuple=1_0_2_4 , A : int=8_1_9_2 , A : Optional[int]=4_0_9_6 , A : Union[str, Any]=1_2_8 , A : Optional[Any]=1_0 , A : Optional[Any]=0 , A : Optional[Any]=1_6 , A : Tuple=1_6 , A : Dict=1_2_8 , A : Optional[Any]=0.0 , A : int=1e-5 , A : int=False , A : Optional[Any]=0.0 , A : Dict="float32" , A : Union[str, Any]=False , A : Optional[Any]=False , A : str=False , A : Tuple=0.0_02 , A : Tuple=False , A : Optional[int]=True , A : Optional[Any]=3_5_9_9_8 , A : Any=3_5_9_9_5 , A : Union[str, Any]=3_5_9_9_9 , **A : Optional[int] , ) ->Optional[int]: lowerCamelCase__ : Tuple = vocab_size lowerCamelCase__ : List[Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Any = d_ff lowerCamelCase__ : str = d_ext lowerCamelCase__ : Tuple = d_spout lowerCamelCase__ : Dict = num_switch_layers lowerCamelCase__ : Tuple = num_ext_layers lowerCamelCase__ : List[Any] = num_switch_layers + num_ext_layers lowerCamelCase__ : List[Any] = num_heads lowerCamelCase__ : str = num_experts lowerCamelCase__ : List[str] = expert_capacity lowerCamelCase__ : Optional[Any] = dropout_rate lowerCamelCase__ : Optional[int] = layer_norm_epsilon lowerCamelCase__ : Optional[int] = router_bias lowerCamelCase__ : Any = router_jitter_noise lowerCamelCase__ : int = router_dtype lowerCamelCase__ : Optional[Any] = router_ignore_padding_tokens lowerCamelCase__ : List[Any] = output_hidden_states lowerCamelCase__ : Union[str, Any] = output_attentions lowerCamelCase__ : Dict = initializer_factor lowerCamelCase__ : Union[str, Any] = output_router_logits lowerCamelCase__ : Union[str, Any] = use_cache super().__init__( separator_token_id=A , pad_token_id=A , eos_token_id=A , **A , )
265
def _a ( UpperCAmelCase ) -> int: """simple docstring""" if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('''only integers accepted as input''' ) else: lowerCamelCase__ : Any = str(abs(UpperCAmelCase ) ) lowerCamelCase__ : Union[str, Any] = [list(UpperCAmelCase ) for char in range(len(UpperCAmelCase ) )] for index in range(len(UpperCAmelCase ) ): num_transpositions[index].pop(UpperCAmelCase ) return max( int(''''''.join(list(UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
265
1
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _A : List[Any] =logging.get_logger(__name__) _A : Dict =['''model.decoder.embed_positions.weights'''] def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: if "emb" in name: lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]: lowerCamelCase__ : int = list(state_dict.keys() ) lowerCamelCase__ : Tuple = {} for key in keys: lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase ) if "in_proj_weight" in key: # split fused qkv proj lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :] lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :] lowerCamelCase__ : Optional[int] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCamelCase__ : str = val else: lowerCamelCase__ : Union[str, Any] = val return state_dict, enc_dec_proj_state_dict def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values lowerCamelCase__ : int = 1024 lowerCamelCase__ : int = 24 lowerCamelCase__ : List[Any] = 16 elif checkpoint == "medium": lowerCamelCase__ : Any = 1536 lowerCamelCase__ : Union[str, Any] = 48 lowerCamelCase__ : Optional[int] = 24 elif checkpoint == "large": lowerCamelCase__ : Optional[Any] = 2048 lowerCamelCase__ : Dict = 48 lowerCamelCase__ : List[Any] = 32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) lowerCamelCase__ : Any = MusicgenDecoderConfig( hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , ) return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase ) lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase ) lowerCamelCase__ : Any = fairseq_model.lm.state_dict() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict( UpperCamelCase , hidden_size=decoder_config.hidden_size ) lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" ) lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase ) if len(UpperCamelCase ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(UpperCamelCase ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase ) # check we can do a forward pass lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" ) lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase ) # set the appropriate bos/pad token ids lowerCamelCase__ : Union[str, Any] = 2048 lowerCamelCase__ : List[str] = 2048 # set other default generation config params lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate ) lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[Any] = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(UpperCamelCase ) processor.push_to_hub(UpperCamelCase ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) _A : List[str] =parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
41
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A : Dict ='''tiny-wmt19-en-ru''' # Build # borrowed from a test _A : List[str] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A : str =dict(zip(vocab, range(len(vocab)))) _A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A : Union[str, Any] =Path(tmpdirname) _A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A : int =FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A : List[str] =FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A : Union[str, Any] =FSMTForConditionalGeneration(config) print(F'num of params {tiny_model.num_parameters()}') # Test _A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A : Tuple =tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-ru
41
1
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowercase__ : List[str] = get_logger(__name__) class __lowerCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[str] = None ) -> Any: '''simple docstring''' _UpperCamelCase = ( os.path.join(lowerCAmelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) _UpperCamelCase = Extractor def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str ) -> str: '''simple docstring''' from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" _UpperCamelCase = os.path.abspath(lowerCAmelCase__ ) return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase__ ) ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : bool ) -> bool: '''simple docstring''' return force_extract or ( not os.path.isfile(lowerCAmelCase__ ) and not (os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ )) ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False ) -> str: '''simple docstring''' _UpperCamelCase = self.extractor.infer_extractor_format(lowerCAmelCase__ ) if not extractor_format: return input_path _UpperCamelCase = self._get_output_path(lowerCAmelCase__ ) if self._do_extract(lowerCAmelCase__ , lowerCAmelCase__ ): self.extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return output_path class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod @abstractmethod def snake_case__ ( cls : List[Any] , lowerCAmelCase__ : Union[Path, str] , **lowerCAmelCase__ : Union[str, Any] ) -> bool: '''simple docstring''' ... @staticmethod @abstractmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' ... class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : List[bytes] = [] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : int ) -> List[str]: '''simple docstring''' with open(lowerCAmelCase__ , '''rb''' ) as f: return f.read(lowerCAmelCase__ ) @classmethod def snake_case__ ( cls : List[str] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : bytes = b"" ) -> bool: '''simple docstring''' if not magic_number: _UpperCamelCase = max(len(lowerCAmelCase__ ) for cls_magic_number in cls.magic_numbers ) try: _UpperCamelCase = cls.read_magic_number(lowerCAmelCase__ , lowerCAmelCase__ ) except OSError: return False return any(magic_number.startswith(lowerCAmelCase__ ) for cls_magic_number in cls.magic_numbers ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod def snake_case__ ( cls : Any , lowerCAmelCase__ : Union[Path, str] , **lowerCAmelCase__ : Dict ) -> bool: '''simple docstring''' return tarfile.is_tarfile(lowerCAmelCase__ ) @staticmethod def snake_case__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> str: '''simple docstring''' def resolved(lowerCAmelCase__ : str ) -> str: return os.path.realpath(os.path.abspath(lowerCAmelCase__ ) ) def badpath(lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ).startswith(lowerCAmelCase__ ) def badlink(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ) -> bool: # Links are interpreted relative to the directory containing the link _UpperCamelCase = resolved(os.path.join(lowerCAmelCase__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowerCAmelCase__ ) _UpperCamelCase = resolved(lowerCAmelCase__ ) for finfo in members: if badpath(finfo.name , lowerCAmelCase__ ): logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(lowerCAmelCase__ , lowerCAmelCase__ ): logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(lowerCAmelCase__ , lowerCAmelCase__ ): logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) _UpperCamelCase = tarfile.open(lowerCAmelCase__ ) tar_file.extractall(lowerCAmelCase__ , members=TarExtractor.safemembers(lowerCAmelCase__ , lowerCAmelCase__ ) ) tar_file.close() class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : List[str] = [b'\x1F\x8B'] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' with gzip.open(lowerCAmelCase__ , '''rb''' ) as gzip_file: with open(lowerCAmelCase__ , '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Dict = [ b'PK\x03\x04', b'PK\x05\x06', # empty archive b'PK\x07\x08', # spanned archive ] @classmethod def snake_case__ ( cls : str , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : bytes = b"" ) -> bool: '''simple docstring''' if super().is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowerCAmelCase__ , '''rb''' ) as fp: _UpperCamelCase = _EndRecData(lowerCAmelCase__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: _UpperCamelCase = fp.read(lowerCAmelCase__ ) # CD is where we expect it to be if len(lowerCAmelCase__ ) == sizeCentralDir: _UpperCamelCase = struct.unpack(lowerCAmelCase__ , lowerCAmelCase__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) with zipfile.ZipFile(lowerCAmelCase__ , '''r''' ) as zip_file: zip_file.extractall(lowerCAmelCase__ ) zip_file.close() class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[Any] = [b'\xFD\x37\x7A\x58\x5A\x00'] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' with lzma.open(lowerCAmelCase__ ) as compressed_file: with open(lowerCAmelCase__ , '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Any = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) _UpperCamelCase = rarfile.RarFile(lowerCAmelCase__ ) rf.extractall(lowerCAmelCase__ ) rf.close() class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[Any] = [b'\x28\xb5\x2F\xFD'] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd _UpperCamelCase = zstd.ZstdDecompressor() with open(lowerCAmelCase__ , '''rb''' ) as ifh, open(lowerCAmelCase__ , '''wb''' ) as ofh: dctx.copy_stream(lowerCAmelCase__ , lowerCAmelCase__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Union[str, Any] = [b'\x42\x5A\x68'] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' with bza.open(lowerCAmelCase__ , '''rb''' ) as compressed_file: with open(lowerCAmelCase__ , '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Dict = [b'\x37\x7A\xBC\xAF\x27\x1C'] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) with pyazr.SevenZipFile(lowerCAmelCase__ , '''r''' ) as archive: archive.extractall(lowerCAmelCase__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = [b'\x04\x22\x4D\x18'] @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] ) -> None: '''simple docstring''' if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(lowerCAmelCase__ , '''rb''' ) as compressed_file: with open(lowerCAmelCase__ , '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ ) class __lowerCAmelCase : """simple docstring""" # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) _snake_case : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def snake_case__ ( cls : Union[str, Any] ) -> List[Any]: '''simple docstring''' return max( len(lowerCAmelCase__ ) for extractor in cls.extractors.values() if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : int ) -> Dict: '''simple docstring''' try: return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase__ , magic_number_length=lowerCAmelCase__ ) except OSError: return b"" @classmethod def snake_case__ ( cls : Union[str, Any] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : bool = False ) -> bool: '''simple docstring''' warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' , category=lowerCAmelCase__ , ) _UpperCamelCase = cls.infer_extractor_format(lowerCAmelCase__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def snake_case__ ( cls : Tuple , lowerCAmelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/> '''simple docstring''' _UpperCamelCase = cls._get_magic_number_max_length() _UpperCamelCase = cls._read_magic_number(lowerCAmelCase__ , lowerCAmelCase__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__ ): return extractor_format @classmethod def snake_case__ ( cls : Optional[int] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Union[Path, str] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None: '''simple docstring''' os.makedirs(os.path.dirname(lowerCAmelCase__ ) , exist_ok=lowerCAmelCase__ ) # Prevent parallel extractions _UpperCamelCase = str(Path(lowerCAmelCase__ ).with_suffix('''.lock''' ) ) with FileLock(lowerCAmelCase__ ): shutil.rmtree(lowerCAmelCase__ , ignore_errors=lowerCAmelCase__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' , category=lowerCAmelCase__ , ) _UpperCamelCase = extractor if extractor != '''deprecated''' else extractor_format else: _UpperCamelCase = cls.extractors[extractor_format] return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' , category=lowerCAmelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowerCAmelCase__ ): return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
287
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple ) -> int: '''simple docstring''' _UpperCamelCase = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def snake_case__ ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def snake_case__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) ) def snake_case__ ( self : Dict ) -> Dict: '''simple docstring''' _UpperCamelCase = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) ) def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _UpperCamelCase = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def snake_case__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _UpperCamelCase = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def snake_case__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _UpperCamelCase = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def snake_case__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _UpperCamelCase = '''fp16''' self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _UpperCamelCase = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def snake_case__ ( self : Optional[Any] ) -> str: '''simple docstring''' _UpperCamelCase = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _UpperCamelCase = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) ) def snake_case__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _UpperCamelCase = '''fp16''' self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
287
1
import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''math domain error''' ) return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0] def _UpperCamelCase ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
9
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _UpperCAmelCase ( ) -> Tuple: _lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" ) return image def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict: _lowerCAmelCase : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]: _lowerCAmelCase : str = dct.pop(_lowerCamelCase ) _lowerCAmelCase : str = val def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' ) _lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict _lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) ) _lowerCAmelCase : str = qkv_bias def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]: _lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24 _lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict() elif "opt-6.7b" in model_name: _lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict() elif "t5-xl" in model_name: _lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase ) return config, image_size @torch.no_grad() def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]: _lowerCAmelCase : int = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0] _lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase ) _lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval() _lowerCAmelCase : Union[str, Any] = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess( name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase ) original_model.eval() print("""Done!""" ) # update state dict keys _lowerCAmelCase : List[Any] = original_model.state_dict() _lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase ) if key.startswith("""Qformer.bert""" ): _lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _lowerCAmelCase : int = key.replace("""t5""" , """language""" ) _lowerCAmelCase : Tuple = val # read in qv biases read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) assert len(_lowerCamelCase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _lowerCAmelCase : Union[str, Any] = load_demo_image() _lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase ) _lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase ) # create processor _lowerCAmelCase : Optional[int] = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase ) _lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase ) _lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) original_model.to(_lowerCamelCase ) hf_model.to(_lowerCamelCase ) with torch.no_grad(): if "opt" in model_name: _lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits else: _lowerCAmelCase : List[Any] = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _lowerCAmelCase : Any = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase ) assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _lowerCAmelCase : List[Any] = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase ) else: # cast to same type _lowerCAmelCase : Union[str, Any] = logits.dtype assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _lowerCAmelCase : Optional[int] = """""" _lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase ) _lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} ) _lowerCAmelCase : Dict = hf_model.generate( _lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , _lowerCamelCase ) _lowerCAmelCase : int = input_ids.shape[1] _lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase ) _lowerCAmelCase : List[str] = [text.strip() for text in output_text] print("""HF generation:""" , _lowerCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if push_to_hub: processor.push_to_hub(f'nielsr/{model_name}' ) hf_model.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() UpperCamelCase_ = [ """blip2-opt-2.7b""", """blip2-opt-6.7b""", """blip2-opt-2.7b-coco""", """blip2-opt-6.7b-coco""", """blip2-flan-t5-xl""", """blip2-flan-t5-xl-coco""", """blip2-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""blip2-opt-2.7b""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
309
0
"""simple docstring""" import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class lowerCamelCase__ ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple ): a__: List[Any] =inspect.getfile(accelerate.test_utils ) a__: Tuple =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) a__: List[str] =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def _lowerCamelCase ( self : int ): a__: List[Any] =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() a__: int =[sys.executable] + distributed_args execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
364
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __lowerCamelCase ( __magic_name__ : List[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def __lowerCamelCase ( __magic_name__ : Optional[Any] ): class lowerCamelCase__ : def __init__( self : str , _a : Any ): a__: Any =metric_id class lowerCamelCase__ : _lowerCAmelCase = [MetricMock(_a ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']] def _lowerCamelCase ( self : Optional[Any] ): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[int] ): if "tmp_path" in args: a__: Tuple =tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__magic_name__ , match="https://huggingface.co/docs/evaluate" ): func(*__magic_name__ )
42
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = XGLMTokenizer _lowerCamelCase = XGLMTokenizerFast _lowerCamelCase = True _lowerCamelCase = True def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<pad>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(UpperCamelCase ) , 1008 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def snake_case ( self ): """simple docstring""" return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def snake_case ( self ): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase , f.name ) lowerCamelCase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase ) lowerCamelCase_ = pickle.dumps(UpperCamelCase ) pickle.loads(UpperCamelCase ) def snake_case ( self ): """simple docstring""" if not self.test_rust_tokenizer: return lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = "I was born in 92000, and this is falsé." lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) lowerCamelCase_ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(UpperCamelCase ) lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "Hello World!" lowerCamelCase_ = [2, 3_1227, 4447, 35] self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off lowerCamelCase_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def snake_case ( self ): """simple docstring""" # fmt: off lowerCamelCase_ = { "input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="facebook/xglm-564M" , padding=UpperCamelCase , )
55
'''simple docstring''' from collections import Counter from timeit import timeit def __a ( UpperCAmelCase = "" , ) ->bool: """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def __a ( UpperCAmelCase = "" ) ->bool: """simple docstring""" if len(UpperCAmelCase ) == 0: return True A = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string A = {} for character in lower_case_input_str: A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1 A = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __a ( UpperCAmelCase = "" ) ->None: """simple docstring""" print("""\nFor string = """ , UpperCAmelCase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _lowerCamelCase : Any = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) _lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
258
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __snake_case : List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name __snake_case : Optional[int] ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str=8): '''simple docstring''' lowerCAmelCase__ : Any = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowerCAmelCase__ : int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) -> int: """simple docstring""" super().__init__() self.register_modules( unet=__lowerCamelCase ,scheduler=__lowerCamelCase ,movq=__lowerCamelCase ,) lowerCAmelCase__ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" if latents is None: lowerCAmelCase__ : int = randn_tensor(__lowerCamelCase ,generator=__lowerCamelCase ,device=__lowerCamelCase ,dtype=__lowerCamelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) lowerCAmelCase__ : List[Any] = latents.to(__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> Dict: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowerCAmelCase__ : Tuple = torch.device(f"""cuda:{gpu_id}""" ) lowerCAmelCase__ : Optional[int] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCamelCase ,__lowerCamelCase ) def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> Union[str, Any]: """simple docstring""" if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) lowerCAmelCase__ : str = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' ,silence_dtype_warnings=__lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCAmelCase__ : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cpu_offload_with_hook(__lowerCamelCase ,__lowerCamelCase ,prev_module_hook=__lowerCamelCase ) # We'll offload the last model manually. lowerCAmelCase__ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" if not hasattr(self.unet ,'''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__lowerCamelCase ,'''_hf_hook''' ) and hasattr(module._hf_hook ,'''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__lowerCamelCase ) def __call__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 5_12 ,__lowerCamelCase = 5_12 ,__lowerCamelCase = 1_00 ,__lowerCamelCase = 4.0 ,__lowerCamelCase = 1 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = "pil" ,__lowerCamelCase = True ,) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = self._execution_device lowerCAmelCase__ : int = guidance_scale > 1.0 if isinstance(__lowerCamelCase ,__lowerCamelCase ): lowerCAmelCase__ : Any = torch.cat(__lowerCamelCase ,dim=0 ) lowerCAmelCase__ : Optional[int] = image_embeds.shape[0] * num_images_per_prompt if isinstance(__lowerCamelCase ,__lowerCamelCase ): lowerCAmelCase__ : Optional[int] = torch.cat(__lowerCamelCase ,dim=0 ) if do_classifier_free_guidance: lowerCAmelCase__ : List[Any] = image_embeds.repeat_interleave(__lowerCamelCase ,dim=0 ) lowerCAmelCase__ : Tuple = negative_image_embeds.repeat_interleave(__lowerCamelCase ,dim=0 ) lowerCAmelCase__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase ,device=__lowerCamelCase ) lowerCAmelCase__ : List[str] = self.scheduler.timesteps lowerCAmelCase__ : int = self.unet.config.in_channels lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = downscale_height_and_width(__lowerCamelCase ,__lowerCamelCase ,self.movq_scale_factor ) # create initial latent lowerCAmelCase__ : Union[str, Any] = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,self.scheduler ,) for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance lowerCAmelCase__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCAmelCase__ : Any = {'''image_embeds''': image_embeds} lowerCAmelCase__ : Any = self.unet( sample=__lowerCamelCase ,timestep=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,added_cond_kwargs=__lowerCamelCase ,return_dict=__lowerCamelCase ,)[0] if do_classifier_free_guidance: lowerCAmelCase__ , lowerCAmelCase__ : int = noise_pred.split(latents.shape[1] ,dim=1 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = noise_pred.chunk(2 ) lowerCAmelCase__ , lowerCAmelCase__ : int = variance_pred.chunk(2 ) lowerCAmelCase__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCAmelCase__ : Tuple = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,'''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCAmelCase__ , lowerCAmelCase__ : int = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : int = self.scheduler.step( __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ,)[0] # post-processing lowerCAmelCase__ : Tuple = self.movq.decode(__lowerCamelCase ,force_not_quantize=__lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: lowerCAmelCase__ : Union[str, Any] = image * 0.5 + 0.5 lowerCAmelCase__ : Optional[Any] = image.clamp(0 ,1 ) lowerCAmelCase__ : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": lowerCAmelCase__ : str = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase )
94
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_) lowerCAmelCase__ : str = len(lowerCamelCase_) lowerCAmelCase__ : Union[str, Any] = [[False for _ in range(m + 1)] for _ in range(n + 1)] lowerCAmelCase__ : List[Any] = True for i in range(lowerCamelCase_): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCAmelCase__ : int = True if a[i].islower(): lowerCAmelCase__ : int = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
94
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def _snake_case ( _snake_case : Namespace ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) snake_case__ : Tuple = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class snake_case_( a__ ): @staticmethod def lowerCamelCase__ ( UpperCamelCase_ : ArgumentParser ): lowerCAmelCase : Dict = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=UpperCamelCase_ , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=UpperCamelCase_ ) def __init__( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , *UpperCamelCase_ : Union[str, Any] , ): lowerCAmelCase : Union[str, Any] = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(F'''Loading model {model_type}''' ) lowerCAmelCase : Optional[int] = model_type lowerCAmelCase : Optional[Any] = tf_checkpoint lowerCAmelCase : List[Any] = pytorch_dump_output lowerCAmelCase : Optional[int] = config lowerCAmelCase : Any = finetuning_task_name def lowerCamelCase__ ( self : Optional[int] ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(UpperCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCamelCase_ ) if "ckpt" in self._tf_checkpoint.lower(): lowerCAmelCase : Union[str, Any] = self._tf_checkpoint lowerCAmelCase : List[str] = '''''' else: lowerCAmelCase : Dict = self._tf_checkpoint lowerCAmelCase : Any = '''''' convert_transfo_xl_checkpoint_to_pytorch( UpperCamelCase_ , self._config , self._pytorch_dump_output , UpperCamelCase_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCamelCase_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCamelCase_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
60
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: snake_case__ : str = None snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Dict = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } snake_case__ : Any = { '''google/fnet-base''': 512, '''google/fnet-large''': 512, } snake_case__ : Dict = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''token_type_ids'''] __UpperCamelCase = FNetTokenizer def __init__( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="[SEP]" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="[CLS]" , UpperCamelCase_ : int="[MASK]" , **UpperCamelCase_ : Optional[Any] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase : int = ( AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token ) super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = do_lower_case lowerCAmelCase : str = remove_space lowerCAmelCase : Any = keep_accents lowerCAmelCase : int = vocab_file lowerCAmelCase : List[str] = False if not self.vocab_file else True def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[int] = [self.sep_token_id] lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : List[str] = [self.sep_token_id] lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : str = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
60
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''facebook/nllb-200-distilled-600M''' lowerCAmelCase_ = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) lowerCAmelCase_ = '''translator''' lowerCAmelCase_ = AutoTokenizer lowerCAmelCase_ = AutoModelForSeqaSeqLM lowerCAmelCase_ = LANGUAGE_CODES lowerCAmelCase_ = ['''text''', '''text''', '''text'''] lowerCAmelCase_ = ['''text'''] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _snake_case = self.lang_to_code[src_lang] _snake_case = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCAmelCase_ , return_tensors='pt' , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.model.generate(**lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_ )
295
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
1
'''simple docstring''' import baseaa def _A ( _lowerCAmelCase ): """simple docstring""" return baseaa.baaencode(string.encode('utf-8' ) ) def _A ( _lowerCAmelCase ): """simple docstring""" return baseaa.baadecode(__lowerCamelCase ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase = """Hello World!""" lowerCamelCase = baseaa_encode(test) print(encoded) lowerCamelCase = baseaa_decode(encoded) print(decoded)
166
import math def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__lowerCamelCase ) def A__ ( __lowerCamelCase = 1 / 1_23_45 ): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 3 while True: SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase ) total_partitions += 1 if check_partition_perfect(__lowerCamelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__lowerCamelCase ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
299
0
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "tensor(bool)": np.bool_, "tensor(int8)": np.inta, "tensor(uint8)": np.uinta, "tensor(int16)": np.intaa, "tensor(uint16)": np.uintaa, "tensor(int32)": np.intaa, "tensor(uint32)": np.uintaa, "tensor(int64)": np.intaa, "tensor(uint64)": np.uintaa, "tensor(float16)": np.floataa, "tensor(float)": np.floataa, "tensor(double)": np.floataa, } class _a : '''simple docstring''' def __init__( self, A=None, **A ): '''simple docstring''' logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) SCREAMING_SNAKE_CASE : List[Any] = model SCREAMING_SNAKE_CASE : str = kwargs.get('model_save_dir', A ) SCREAMING_SNAKE_CASE : Dict = kwargs.get('latest_model_name', A ) def __call__( self, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = {k: np.array(A ) for k, v in kwargs.items()} return self.model.run(A, A ) @staticmethod def UpperCamelCase_ ( A, A=None, A=None ): '''simple docstring''' if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) SCREAMING_SNAKE_CASE : Optional[Any] = 'CPUExecutionProvider' return ort.InferenceSession(A, providers=[provider], sess_options=A ) def UpperCamelCase_ ( self, A, A = None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME SCREAMING_SNAKE_CASE : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name ) SCREAMING_SNAKE_CASE : List[Any] = Path(A ).joinpath(A ) try: shutil.copyfile(A, A ) except shutil.SameFileError: pass # copy external weights (for models >2GB) SCREAMING_SNAKE_CASE : Optional[int] = self.model_save_dir.joinpath(A ) if src_path.exists(): SCREAMING_SNAKE_CASE : str = Path(A ).joinpath(A ) try: shutil.copyfile(A, A ) except shutil.SameFileError: pass def UpperCamelCase_ ( self, A, **A, ): '''simple docstring''' if os.path.isfile(A ): logger.error(F"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(A, exist_ok=A ) # saving model weights/files self._save_pretrained(A, **A ) @classmethod def UpperCamelCase_ ( cls, A, A = None, A = None, A = False, A = None, A = None, A = None, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(A ): SCREAMING_SNAKE_CASE : Any = OnnxRuntimeModel.load_model( os.path.join(A, A ), provider=A, sess_options=A ) SCREAMING_SNAKE_CASE : Dict = Path(A ) # load model from hub else: # download model SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download( repo_id=A, filename=A, use_auth_token=A, revision=A, cache_dir=A, force_download=A, ) SCREAMING_SNAKE_CASE : List[Any] = Path(A ).parent SCREAMING_SNAKE_CASE : Optional[Any] = Path(A ).name SCREAMING_SNAKE_CASE : Optional[Any] = OnnxRuntimeModel.load_model(A, provider=A, sess_options=A ) return cls(model=A, **A ) @classmethod def UpperCamelCase_ ( cls, A, A = True, A = None, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None if len(str(A ).split('@' ) ) == 2: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=A, revision=A, cache_dir=A, force_download=A, use_auth_token=A, **A, )
246
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _a : '''simple docstring''' def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : List[Any] = text_seq_length SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : Tuple = use_input_mask SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = type_vocab_size SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : str = coordinate_size SCREAMING_SNAKE_CASE : Tuple = shape_size SCREAMING_SNAKE_CASE : Optional[Any] = num_labels SCREAMING_SNAKE_CASE : int = num_choices SCREAMING_SNAKE_CASE : str = scope SCREAMING_SNAKE_CASE : Optional[int] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE : str = bbox[i, j, 3] SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1] SCREAMING_SNAKE_CASE : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2] SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0] SCREAMING_SNAKE_CASE : Tuple = t SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) SCREAMING_SNAKE_CASE : List[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Tuple = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels ) SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A ) model.to(A ) model.eval() # text + image SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A ) SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) # text only SCREAMING_SNAKE_CASE : List[Any] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Dict = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Any = config_and_inputs SCREAMING_SNAKE_CASE : Dict = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[int] = False A : List[str] = False A : Union[str, Any] = False A : Optional[Any] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) A : List[Any] = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def UpperCamelCase_ ( self, A, A, A, A, A ): '''simple docstring''' return True def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 ) def UpperCamelCase_ ( self, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A ) if model_class in get_values(A ): SCREAMING_SNAKE_CASE : Optional[int] = { k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous() if isinstance(A, torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(A ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A ) elif model_class in get_values(A ): SCREAMING_SNAKE_CASE : Dict = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) SCREAMING_SNAKE_CASE : Dict = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) elif model_class in [ *get_values(A ), ]: SCREAMING_SNAKE_CASE : str = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) elif model_class in [ *get_values(A ), ]: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, ) return inputs_dict def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE : List[str] = type self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Any = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass SCREAMING_SNAKE_CASE : Union[str, Any] = model( input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), ) # verify the logits SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
246
1
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCamelCase__ ( _a , _a , _a , _a): SCREAMING_SNAKE_CASE : str = multiprocessing.Manager() SCREAMING_SNAKE_CASE : Tuple = manager.list() SCREAMING_SNAKE_CASE : Tuple = multiprocessing.Process(target=_a , args=(check_program, result, timeout)) p.start() p.join(timeout=timeout + 1) if p.is_alive(): p.kill() if not result: result.append("timed out") return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCamelCase__ ( _a , _a , _a): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil SCREAMING_SNAKE_CASE : Tuple = shutil.rmtree SCREAMING_SNAKE_CASE : Any = os.rmdir SCREAMING_SNAKE_CASE : Tuple = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: SCREAMING_SNAKE_CASE : List[Any] = {} with swallow_io(): with time_limit(_a): exec(_a , _a) result.append("passed") except TimeoutException: result.append("timed out") except BaseException as e: result.append(f"failed: {e}") # Needed for cleaning up. SCREAMING_SNAKE_CASE : Optional[int] = rmtree SCREAMING_SNAKE_CASE : Tuple = rmdir SCREAMING_SNAKE_CASE : List[str] = chdir @contextlib.contextmanager def lowerCamelCase__ ( _a): def signal_handler(_a , _a): raise TimeoutException("Timed out!") signal.setitimer(signal.ITIMER_REAL , _a) signal.signal(signal.SIGALRM , _a) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0) @contextlib.contextmanager def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = WriteOnlyStringIO() with contextlib.redirect_stdout(_a): with contextlib.redirect_stderr(_a): with redirect_stdin(_a): yield @contextlib.contextmanager def lowerCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(_a): yield dirname class _UpperCamelCase ( lowerCAmelCase_ ): '''simple docstring''' pass class _UpperCamelCase ( io.StringIO ): '''simple docstring''' def __UpperCamelCase ( self : Optional[Any] , *a : Optional[Any] , **a : Union[str, Any] ) -> List[Any]: """simple docstring""" raise OSError def __UpperCamelCase ( self : Any , *a : Optional[int] , **a : str ) -> Union[str, Any]: """simple docstring""" raise OSError def __UpperCamelCase ( self : Tuple , *a : str , **a : Tuple ) -> Union[str, Any]: """simple docstring""" raise OSError def __UpperCamelCase ( self : List[str] , *a : Union[str, Any] , **a : int ) -> int: """simple docstring""" return False class _UpperCamelCase ( contextlib._RedirectStream ): # type: ignore '''simple docstring''' lowerCamelCase__ ="stdin" @contextlib.contextmanager def lowerCamelCase__ ( _a): if root == ".": yield return SCREAMING_SNAKE_CASE : Any = os.getcwd() os.chdir(_a) try: yield except BaseException as exc: raise exc finally: os.chdir(_a) def lowerCamelCase__ ( _a=None): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes)) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes)) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes)) faulthandler.disable() import builtins SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None import os SCREAMING_SNAKE_CASE : Optional[int] = '''1''' SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None import shutil SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Any = None import subprocess SCREAMING_SNAKE_CASE : List[Any] = None # type: ignore SCREAMING_SNAKE_CASE : Optional[int] = None import sys SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = None
76
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _A : str = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __init__( self : Dict , *A : Any , **A : List[Any] ) ->None: warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , A , ) super().__init__(*A , **A )
142
0
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_a ) _UpperCamelCase : Optional[Any] = -1 _UpperCamelCase : str = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_a ) _UpperCamelCase : Tuple = model.generate(_a ,max_new_tokens=10 ,do_sample=_a ) _UpperCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : List[Any] = TextStreamer(_a ) model.generate(_a ,max_new_tokens=10 ,do_sample=_a ,streamer=_a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(_a ,_a ) def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_a ) _UpperCamelCase : Dict = -1 _UpperCamelCase : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_a ) _UpperCamelCase : List[str] = model.generate(_a ,max_new_tokens=10 ,do_sample=_a ) _UpperCamelCase : Union[str, Any] = tokenizer.decode(greedy_ids[0] ) _UpperCamelCase : Union[str, Any] = TextIteratorStreamer(_a ) _UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : Union[str, Any] = Thread(target=model.generate ,kwargs=_a ) thread.start() _UpperCamelCase : Any = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_a ,_a ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_a ) _UpperCamelCase : List[str] = -1 _UpperCamelCase : str = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_a ) _UpperCamelCase : List[Any] = model.generate(_a ,max_new_tokens=10 ,do_sample=_a ) _UpperCamelCase : Tuple = greedy_ids[:, input_ids.shape[1] :] _UpperCamelCase : List[Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCamelCase : Dict = TextStreamer(_a ,skip_prompt=_a ) model.generate(_a ,max_new_tokens=10 ,do_sample=_a ,streamer=_a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(_a ,_a ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('distilgpt2' ) _UpperCamelCase : str = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(_a ) _UpperCamelCase : Any = -1 _UpperCamelCase : Optional[int] = torch.ones((1, 5) ,device=_a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCamelCase : Optional[int] = TextStreamer(_a ,skip_special_tokens=_a ) model.generate(_a ,max_new_tokens=1 ,do_sample=_a ,streamer=_a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCamelCase : Union[str, Any] = cs.out[:-1] # Remove the final "\n" _UpperCamelCase : int = tokenizer(_a ,return_tensors='pt' ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_a ) _UpperCamelCase : Tuple = -1 _UpperCamelCase : Optional[int] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_a ) _UpperCamelCase : Any = TextIteratorStreamer(_a ,timeout=0.0_0_1 ) _UpperCamelCase : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _UpperCamelCase : int = Thread(target=model.generate ,kwargs=_a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_a ): _UpperCamelCase : Tuple = "" for new_text in streamer: streamer_text += new_text
370
'''simple docstring''' class lowercase__ : def __init__( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ): '''simple docstring''' _UpperCamelCase : Dict = None _UpperCamelCase : List[Any] = None _UpperCamelCase : int = graph self._normalize_graph(lowerCamelCase__ ,lowerCamelCase__ ) _UpperCamelCase : List[str] = len(lowerCamelCase__ ) _UpperCamelCase : Tuple = None def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ): '''simple docstring''' if sources is int: _UpperCamelCase : Optional[int] = [sources] if sinks is int: _UpperCamelCase : Union[str, Any] = [sinks] if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0: return _UpperCamelCase : List[str] = sources[0] _UpperCamelCase : str = sinks[0] # make fake vertex if there are more # than one source or sink if len(lowerCamelCase__ ) > 1 or len(lowerCamelCase__ ) > 1: _UpperCamelCase : Dict = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _UpperCamelCase : Tuple = len(self.graph ) + 1 for room in self.graph: room.insert(0 ,0 ) self.graph.insert(0 ,[0] * size ) for i in sources: _UpperCamelCase : List[Any] = max_input_flow _UpperCamelCase : Tuple = 0 _UpperCamelCase : int = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _UpperCamelCase : Optional[int] = max_input_flow _UpperCamelCase : Optional[int] = size - 1 def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('You need to set maximum flow algorithm before.' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Any ): '''simple docstring''' _UpperCamelCase : str = algorithm(self ) class lowercase__ : def __init__( self : List[str] ,lowerCamelCase__ : Optional[int] ): '''simple docstring''' _UpperCamelCase : Optional[Any] = flow_network _UpperCamelCase : List[str] = flow_network.verticesCount _UpperCamelCase : List[str] = flow_network.sourceIndex _UpperCamelCase : Any = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _UpperCamelCase : List[Any] = flow_network.graph _UpperCamelCase : Any = False def UpperCamelCase_ ( self : str ): '''simple docstring''' if not self.executed: self._algorithm() _UpperCamelCase : Any = True def UpperCamelCase_ ( self : Any ): '''simple docstring''' pass class lowercase__ ( lowercase ): def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[str] ): '''simple docstring''' super().__init__(lowerCamelCase__ ) # use this to save your result _UpperCamelCase : Tuple = -1 def UpperCamelCase_ ( self : Dict ): '''simple docstring''' if not self.executed: raise Exception('You should execute algorithm before using its result!' ) return self.maximum_flow class lowercase__ ( lowercase ): def __init__( self : Optional[Any] ,lowerCamelCase__ : Dict ): '''simple docstring''' super().__init__(lowerCamelCase__ ) _UpperCamelCase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )] _UpperCamelCase : Optional[Any] = [0] * self.verticies_count _UpperCamelCase : List[str] = [0] * self.verticies_count def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Dict = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _UpperCamelCase : List[str] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _UpperCamelCase : int = 0 while i < len(lowerCamelCase__ ): _UpperCamelCase : List[Any] = vertices_list[i] _UpperCamelCase : str = self.heights[vertex_index] self.process_vertex(lowerCamelCase__ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 ,vertices_list.pop(lowerCamelCase__ ) ) _UpperCamelCase : Dict = 0 else: i += 1 _UpperCamelCase : Optional[Any] = sum(self.preflow[self.source_index] ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ): '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(lowerCamelCase__ ,lowerCamelCase__ ) self.relabel(lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = min( self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[int] ): '''simple docstring''' _UpperCamelCase : Tuple = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _UpperCamelCase : List[Any] = self.heights[to_index] if min_height is not None: _UpperCamelCase : Any = min_height + 1 if __name__ == "__main__": snake_case_ : List[str] = [0] snake_case_ : int = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] snake_case_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network snake_case_ : List[Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate snake_case_ : Tuple = flow_network.find_maximum_flow() print(F"""maximum flow is {maximum_flow}""")
236
0
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : str = "▁" , __lowerCamelCase : bool = True , __lowerCamelCase : Union[str, AddedToken] = "<unk>" , __lowerCamelCase : Union[str, AddedToken] = "</s>" , __lowerCamelCase : Union[str, AddedToken] = "<pad>" , ) -> int: a = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } a = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): a = token_dict["token"] a = Tokenizer(Unigram() ) a = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ) , " " ), normalizers.Lowercase(), ] ) a = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__lowerCamelCase , add_prefix_space=__lowerCamelCase ), pre_tokenizers.Digits(individual_digits=__lowerCamelCase ), pre_tokenizers.Punctuation(), ] ) a = decoders.Metaspace(replacement=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = TemplateProcessing( single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) a = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 80_00 , __lowerCamelCase : bool = True , ) -> Optional[Any]: a = trainers.UnigramTrainer( vocab_size=__lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=__lowerCamelCase , ) if isinstance(__lowerCamelCase , __lowerCamelCase ): a = [files] self._tokenizer.train(__lowerCamelCase , trainer=__lowerCamelCase ) self.add_unk_id() def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , __lowerCamelCase : int = 80_00 , __lowerCamelCase : bool = True , ) -> Optional[int]: a = trainers.UnigramTrainer( vocab_size=__lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=__lowerCamelCase , ) self._tokenizer.train_from_iterator(__lowerCamelCase , trainer=__lowerCamelCase ) self.add_unk_id() def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a = json.loads(self._tokenizer.to_str() ) a = self.special_tokens["unk"]["id"] a = Tokenizer.from_str(json.dumps(__lowerCamelCase ) )
107
import numpy as np def __magic_name__ ( A : np.array ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __magic_name__ ( A : np.array ): '''simple docstring''' return vector * sigmoid(1.7_02 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
107
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(__lowerCAmelCase ) == 0: raise ValueError('Input list must be a non empty list' ) if len(__lowerCAmelCase ) == 1: return True __a : Any = series[1] - series[0] for index in range(len(__lowerCAmelCase ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(__lowerCAmelCase ) == 0: raise ValueError('Input list must be a non empty list' ) __a : Any = 0 for val in series: answer += val return answer / len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
350
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase : List[Any] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "ernie_m" A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) __a : int = vocab_size __a : Dict = hidden_size __a : str = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : Union[str, Any] = hidden_act __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : int = initializer_range __a : Dict = layer_norm_eps __a : int = classifier_dropout __a : Dict = is_decoder __a : int = act_dropout
294
0
def __snake_case ( _lowerCAmelCase : int = 1000 ) -> int: A_ : str = 3 A_ : Optional[int] = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
300
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCamelCase : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _snake_case = nn.ModuleList(lowercase ) def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ): _snake_case , _snake_case = controlnet( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) # merge samples if i == 0: _snake_case , _snake_case = down_samples, mid_sample else: _snake_case = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowercase , lowercase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ): '''simple docstring''' _snake_case = 0 _snake_case = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , ) idx += 1 _snake_case = model_path_to_save + f'''_{idx}''' @classmethod def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ): '''simple docstring''' _snake_case = 0 _snake_case = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _snake_case = pretrained_model_path while os.path.isdir(lowercase ): _snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase ) controlnets.append(lowercase ) idx += 1 _snake_case = pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' ) if len(lowercase ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' ) return cls(lowercase )
282
0
"""simple docstring""" from math import loga def __UpperCAmelCase ( __lowerCamelCase ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
302
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'], 'tokenization_roberta': ['RobertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['RobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
302
1
'''simple docstring''' from math import factorial __a = {str(d): factorial(d) for d in range(10)} def __snake_case( _lowerCAmelCase ) -> int: return sum(DIGIT_FACTORIAL[d] for d in str(_lowerCAmelCase ) ) def __snake_case( ) -> int: snake_case__ : Optional[Any] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , _lowerCAmelCase ) if sum_of_digit_factorial(_lowerCAmelCase ) == i ) if __name__ == "__main__": print(F"{solution() = }")
35
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = SwinConfig() snake_case__ : Optional[Any] = swin_name.split("""_""" ) snake_case__ : Any = name_split[1] snake_case__ : List[Any] = int(name_split[4] ) snake_case__ : int = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ : List[Any] = 96 snake_case__ : int = (2, 2, 6, 2) snake_case__ : int = (3, 6, 12, 24) elif model_size == "small": snake_case__ : Union[str, Any] = 96 snake_case__ : Optional[Any] = (2, 2, 18, 2) snake_case__ : str = (3, 6, 12, 24) elif model_size == "base": snake_case__ : Dict = 128 snake_case__ : str = (2, 2, 18, 2) snake_case__ : Dict = (4, 8, 16, 32) else: snake_case__ : List[str] = 192 snake_case__ : str = (2, 2, 18, 2) snake_case__ : List[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ : str = 21_841 else: snake_case__ : List[str] = 1_000 snake_case__ : int = """huggingface/label-files""" snake_case__ : Any = """imagenet-1k-id2label.json""" snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : List[Any] = img_size snake_case__ : Dict = num_classes snake_case__ : Dict = embed_dim snake_case__ : Optional[int] = depths snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size return config def __snake_case( _lowerCAmelCase ) -> Dict: if "patch_embed.proj" in name: snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case__ : str = """encoder.""" + name if "attn.proj" in name: snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case__ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case__ : Union[str, Any] = """layernorm.bias""" if "head" in name: snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" ) else: snake_case__ : List[str] = """swin.""" + name return name def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ : Dict = key.split(""".""" ) snake_case__ : Optional[int] = int(key_split[1] ) snake_case__ : Union[str, Any] = int(key_split[3] ) snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ : Optional[Any] = val[:dim, :] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Dict = val[-dim:, :] else: snake_case__ : Tuple = val[ :dim ] snake_case__ : int = val[ dim : dim * 2 ] snake_case__ : int = val[ -dim: ] else: snake_case__ : Union[str, Any] = val return orig_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase ) snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase ) model.eval() snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] ) snake_case__ : str = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
35
1
from typing import TYPE_CHECKING from ...utils import _LazyModule A : Any = {'tokenization_byt5': ['ByT5Tokenizer']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
33
1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]: _lowercase : Optional[Any] = [1] for i in range(2 , lowerCamelCase_ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _lowercase : int = [] _lowercase : Union[str, Any] = list(range(lowerCamelCase_ ) ) # Find permutation while factorials: _lowercase : Dict = factorials.pop() _lowercase , _lowercase : Any = divmod(lowerCamelCase_ , lowerCamelCase_ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
21
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : str=3 , __lowerCamelCase : Union[str, Any]=1_8 , __lowerCamelCase : Union[str, Any]=3_0 , __lowerCamelCase : str=4_0_0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=True , ): """simple docstring""" _SCREAMING_SNAKE_CASE = size if size is not None else {"height": 1_8, "width": 1_8} _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = min_resolution _SCREAMING_SNAKE_CASE = max_resolution _SCREAMING_SNAKE_CASE = do_resize _SCREAMING_SNAKE_CASE = size _SCREAMING_SNAKE_CASE = apply_ocr def lowerCAmelCase_ ( self : int ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowercase_ ( A , unittest.TestCase ): """simple docstring""" lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCAmelCase_ ( self : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "apply_ocr" ) ) def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} ) _SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" pass def lowerCAmelCase_ ( self : Any ): """simple docstring""" # Initialize image_processing _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , __lowerCamelCase ) self.assertIsInstance(encoding.boxes , __lowerCamelCase ) # Test batched _SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" # Initialize image_processing _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched _SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" # Initialize image_processing _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched _SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" # with apply_OCR = True _SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor() from datasets import load_dataset _SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) _SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] ).convert("RGB" ) _SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _SCREAMING_SNAKE_CASE = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 _SCREAMING_SNAKE_CASE = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __lowerCamelCase ) self.assertListEqual(encoding.boxes , __lowerCamelCase ) # with apply_OCR = False _SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
111
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCamelCase_ = '<<<<<<< This should probably be modified because it mentions: ' lowerCamelCase_ = '=======\n>>>>>>>\n' lowerCamelCase_ = [ 'TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir', ] lowerCamelCase_ = [ # (pattern, replacement) # Order is important here for some replacements (r'tfds\.core', r'datasets'), (r'tf\.io\.gfile\.GFile', r'open'), (r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'), (r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'), (r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'), (r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('), (r'tfds\.features\.FeaturesDict\(', r'dict('), (r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), (r'tfds\.', r'datasets.'), (r'dl_manager\.manual_dir', r'self.config.data_dir'), (r'self\.builder_config', r'self.config'), ] def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> List[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class lowercase_ ( A ): """simple docstring""" @staticmethod def lowerCAmelCase_ ( __lowerCamelCase : ArgumentParser ): """simple docstring""" _SCREAMING_SNAKE_CASE = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowerCamelCase ) def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , *__lowerCamelCase : Tuple ): """simple docstring""" _SCREAMING_SNAKE_CASE = get_logger("datasets-cli/converting" ) _SCREAMING_SNAKE_CASE = tfds_path _SCREAMING_SNAKE_CASE = datasets_directory def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" if os.path.isdir(self._tfds_path ): _SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): _SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) _SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = {} if os.path.isdir(self._tfds_path ): _SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) _SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase ) if not os.path.isfile(__lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowerCamelCase , encoding="utf-8" ) as f: _SCREAMING_SNAKE_CASE = f.readlines() _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = [] for line in lines: _SCREAMING_SNAKE_CASE = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: _SCREAMING_SNAKE_CASE = "import datasets\n" elif "import tensorflow" in out_line: # order is important here _SCREAMING_SNAKE_CASE = "" continue elif "from absl import logging" in out_line: _SCREAMING_SNAKE_CASE = "from datasets import logging\n" elif "getLogger" in out_line: _SCREAMING_SNAKE_CASE = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : e in out_line , __lowerCamelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCamelCase ) + "\n" ) out_lines.append(__lowerCamelCase ) out_lines.append(__lowerCamelCase ) continue else: for pattern, replacement in TO_CONVERT: _SCREAMING_SNAKE_CASE = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: _SCREAMING_SNAKE_CASE = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCamelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) _SCREAMING_SNAKE_CASE = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: _SCREAMING_SNAKE_CASE = True out_lines.append(__lowerCamelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset _SCREAMING_SNAKE_CASE = f_name.replace(".py" , "" ) _SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowerCamelCase ) if needs_manual_update: with_manual_update.append(__lowerCamelCase ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.writelines(__lowerCamelCase ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: _SCREAMING_SNAKE_CASE = os.path.basename(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(__lowerCamelCase , __lowerCamelCase ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
111
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ShapEPipeline _SCREAMING_SNAKE_CASE = ["prompt"] _SCREAMING_SNAKE_CASE = ["prompt"] _SCREAMING_SNAKE_CASE = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] _SCREAMING_SNAKE_CASE = False @property def A ( self : int ): """simple docstring""" return 3_2 @property def A ( self : Tuple ): """simple docstring""" return 3_2 @property def A ( self : Union[str, Any] ): """simple docstring""" return self.time_input_dim * 4 @property def A ( self : List[str] ): """simple docstring""" return 8 @property def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ ) @property def A ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { 'num_attention_heads': 2, 'attention_head_dim': 1_6, 'embedding_dim': self.time_input_dim, 'num_embeddings': 3_2, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } UpperCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE_ ) return model @property def A ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { 'param_shapes': ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 1_2, 'background': ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE_ ) return model def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , ) UpperCamelCase = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def A ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=0 ): """simple docstring""" if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 3_2, 'output_type': 'np', } return inputs def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = 'cpu' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A ( self : Any ): """simple docstring""" UpperCamelCase = torch_device == 'cpu' UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : str ): """simple docstring""" UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) UpperCamelCase = ShapEPipeline.from_pretrained('openai/shap-e' ) UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) UpperCamelCase = pipe( 'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
28
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCAmelCase_ ( a__ ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = "arrow", **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__( split=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, keep_in_memory=SCREAMING_SNAKE_CASE_, streaming=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : List[str] = load_from_cache_file UpperCamelCase : List[str] = file_format UpperCamelCase : Optional[int] = Spark( df=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, working_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) def snake_case_ ( self ) -> int: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCamelCase : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=SCREAMING_SNAKE_CASE_, file_format=self._file_format, ) return self.builder.as_dataset(split=self.split )
119
0
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class UpperCamelCase : def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__="None" , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ): A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = relative_attention A__ = position_biased_input A__ = pos_att_type A__ = scope def __A ( self ): A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = TFDebertaVaModel(config=UpperCAmelCase__ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__ ) A__ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = TFDebertaVaForMaskedLM(config=UpperCAmelCase__ ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A__ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = self.num_labels A__ = TFDebertaVaForSequenceClassification(config=UpperCAmelCase__ ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A__ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = self.num_labels A__ = TFDebertaVaForTokenClassification(config=UpperCAmelCase__ ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A__ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase__ ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A__ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self ): A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowerCAmelCase : Optional[Any] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase : Union[str, Any] = ( { """feature-extraction""": TFDebertaVaModel, """fill-mask""": TFDebertaVaForMaskedLM, """question-answering""": TFDebertaVaForQuestionAnswering, """text-classification""": TFDebertaVaForSequenceClassification, """token-classification""": TFDebertaVaForTokenClassification, """zero-shot""": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase : Any = False lowerCAmelCase : Optional[Any] = False def __A ( self ): A__ = TFDebertaVaModelTester(self ) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def __A ( self ): self.config_tester.run_common_tests() def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def __A ( self ): A__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) self.assertIsNotNone(UpperCAmelCase__ ) @require_tf class UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def __A ( self ): pass @slow def __A ( self ): A__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) A__ = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) A__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] A__ = tf.constant( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 )
367
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) def UpperCamelCase ( _A : str )-> List[str]: """simple docstring""" A__ = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) A__ = MaskFormerConfig(backbone_config=_A ) A__ = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok A__ = 847 A__ = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok A__ = 150 A__ = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok A__ = 171 A__ = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO A__ = 133 A__ = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok A__ = 19 A__ = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok A__ = 65 A__ = "mapillary-vistas-id2label.json" A__ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) ) A__ = {int(_A ): v for k, v in idalabel.items()} return config def UpperCamelCase ( _A : Tuple )-> Union[str, Any]: """simple docstring""" A__ = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def UpperCamelCase ( _A : int , _A : List[str] , _A : List[str] )-> Any: """simple docstring""" A__ = dct.pop(_A ) A__ = val def UpperCamelCase ( _A : Tuple , _A : List[str] )-> Dict: """simple docstring""" A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def UpperCamelCase ( _A : Tuple , _A : Optional[int] )-> Union[str, Any]: """simple docstring""" A__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: hidden_size, :] A__ = in_proj_bias[:config.hidden_size] A__ = in_proj_weight[hidden_size : hidden_size * 2, :] A__ = in_proj_bias[hidden_size : hidden_size * 2] A__ = in_proj_weight[-hidden_size :, :] A__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: hidden_size, :] A__ = in_proj_bias[:config.hidden_size] A__ = in_proj_weight[hidden_size : hidden_size * 2, :] A__ = in_proj_bias[hidden_size : hidden_size * 2] A__ = in_proj_weight[-hidden_size :, :] A__ = in_proj_bias[-hidden_size :] # fmt: on def UpperCamelCase ( )-> torch.Tensor: """simple docstring""" A__ = "http://images.cocodataset.org/val2017/000000039769.jpg" A__ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def UpperCamelCase ( _A : str , _A : str , _A : str , _A : bool = False )-> Tuple: """simple docstring""" A__ = get_maskformer_config(_A ) # load original state_dict with open(_A , "rb" ) as f: A__ = pickle.load(_A ) A__ = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys A__ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_swin_q_k_v(_A , config.backbone_config ) read_in_decoder_q_k_v(_A , _A ) # update to torch tensors for key, value in state_dict.items(): A__ = torch.from_numpy(_A ) # load 🤗 model A__ = MaskFormerForInstanceSegmentation(_A ) model.eval() for name, param in model.named_parameters(): print(_A , param.shape ) A__ , A__ = model.load_state_dict(_A , strict=_A ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(_A ) == 0, f"""Unexpected keys: {unexpected_keys}""" # verify results A__ = prepare_img() if "vistas" in model_name: A__ = 65 elif "cityscapes" in model_name: A__ = 65535 else: A__ = 255 A__ = True if "ade" in model_name else False A__ = MaskFormerImageProcessor(ignore_index=_A , reduce_labels=_A ) A__ = image_processor(_A , return_tensors="pt" ) A__ = model(**_A ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": A__ = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) image_processor.save_pretrained(_A ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(f"""nielsr/{model_name}""" ) image_processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
198
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a : Any = logging.get_logger(__name__) class _a ( _lowerCAmelCase ): A = ['''pixel_values'''] def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: int = size if size is not None else {"""height""": 256, """width""": 256} UpperCAmelCase_: str = get_size_dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_: Any = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" ) UpperCAmelCase_: Dict = do_resize UpperCAmelCase_: Tuple = size UpperCAmelCase_: Dict = resample UpperCAmelCase_: Union[str, Any] = do_center_crop UpperCAmelCase_: List[str] = crop_size UpperCAmelCase_: Optional[int] = do_rescale UpperCAmelCase_: Dict = rescale_factor UpperCAmelCase_: Optional[Any] = do_normalize UpperCAmelCase_: Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_: Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray: UpperCAmelCase_: int = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' ) return resize( SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray: UpperCAmelCase_: int = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> str: return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image: UpperCAmelCase_: str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_: str = resample if resample is not None else self.resample UpperCAmelCase_: Any = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_: str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_: str = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_: Optional[int] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_: Optional[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase_: List[str] = size if size is not None else self.size UpperCAmelCase_: Any = get_size_dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_: Dict = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" ) UpperCAmelCase_: Any = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. UpperCAmelCase_: List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: UpperCAmelCase_: Any = [self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: UpperCAmelCase_: Dict = [self.center_crop(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: UpperCAmelCase_: str = [self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: UpperCAmelCase_: Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_ ) for image in images] UpperCAmelCase_: Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images] UpperCAmelCase_: Any = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
147
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any]=[] ): """simple docstring""" UpperCAmelCase_: Union[str, Any] = size[0] - overlap_pixels * 2 UpperCAmelCase_: Dict = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels UpperCAmelCase_: Union[str, Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5 UpperCAmelCase_: Optional[int] = np.pad(lowerCAmelCase__ , mode="""linear_ramp""" , pad_width=lowerCAmelCase__ , end_values=0 ) if "l" in remove_borders: UpperCAmelCase_: List[Any] = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: UpperCAmelCase_: Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: UpperCAmelCase_: Optional[int] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: UpperCAmelCase_: int = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: Union[str, Any] ): """simple docstring""" return max(lowerCAmelCase__ , min(lowerCAmelCase__ , lowerCAmelCase__ ) ) def lowerCAmelCase_ (lowerCAmelCase__: [int] , lowerCAmelCase__: [int] , lowerCAmelCase__: [int] ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowerCAmelCase_ (lowerCAmelCase__: [int] , lowerCAmelCase__: int , lowerCAmelCase__: [int] ): """simple docstring""" UpperCAmelCase_: str = list(lowerCAmelCase__ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap UpperCAmelCase_: int = clamp_rect(lowerCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: int ): """simple docstring""" UpperCAmelCase_: Optional[Any] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowerCAmelCase__ , (original_slice, 0) ) return result def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: Dict ): """simple docstring""" UpperCAmelCase_: Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) UpperCAmelCase_: Optional[int] = tile.crop(lowerCAmelCase__ ) return tile def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int ): """simple docstring""" UpperCAmelCase_: str = n % d return n - divisor class _a ( _lowerCAmelCase ): def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 350, ) -> str: super().__init__( vae=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, unet=SCREAMING_SNAKE_CASE_, low_res_scheduler=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, max_noise_level=SCREAMING_SNAKE_CASE_, ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict: torch.manual_seed(0 ) UpperCAmelCase_: Dict = ( min(image.size[0] - (tile_size + original_image_slice), x * tile_size ), min(image.size[1] - (tile_size + original_image_slice), y * tile_size ), min(image.size[0], (x + 1) * tile_size ), min(image.size[1], (y + 1) * tile_size ), ) UpperCAmelCase_: Tuple = add_overlap_rect(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, image.size ) UpperCAmelCase_: List[str] = image.crop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] UpperCAmelCase_: List[Any] = translated_slice_x - (original_image_slice / 2) UpperCAmelCase_: str = max(0, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = squeeze_tile(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = to_input.size UpperCAmelCase_: Any = to_input.resize((tile_size, tile_size), Image.BICUBIC ) UpperCAmelCase_: str = super(SCREAMING_SNAKE_CASE_, self ).__call__(image=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).images[0] UpperCAmelCase_: Optional[int] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC ) UpperCAmelCase_: int = unsqueeze_tile(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC ) UpperCAmelCase_: Union[str, Any] = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) UpperCAmelCase_: Tuple = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=SCREAMING_SNAKE_CASE_ ), mode="""L""", ) final_image.paste( SCREAMING_SNAKE_CASE_, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 75, SCREAMING_SNAKE_CASE_ = 9.0, SCREAMING_SNAKE_CASE_ = 50, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 32, ) -> Dict: UpperCAmelCase_: int = Image.new("""RGB""", (image.size[0] * 4, image.size[1] * 4) ) UpperCAmelCase_: str = math.ceil(image.size[0] / tile_size ) UpperCAmelCase_: int = math.ceil(image.size[1] / tile_size ) UpperCAmelCase_: Dict = tcx * tcy UpperCAmelCase_: Optional[Any] = 0 for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): self._process_tile( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, prompt=SCREAMING_SNAKE_CASE_, num_inference_steps=SCREAMING_SNAKE_CASE_, guidance_scale=SCREAMING_SNAKE_CASE_, noise_level=SCREAMING_SNAKE_CASE_, negative_prompt=SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_, eta=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, latents=SCREAMING_SNAKE_CASE_, ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: Tuple = """stabilityai/stable-diffusion-x4-upscaler""" UpperCAmelCase_: Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ , revision="""fp16""" , torch_dtype=torch.floataa ) UpperCAmelCase_: str = pipe.to("""cuda""" ) UpperCAmelCase_: List[str] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(lowerCAmelCase__: Dict ): print(F'progress: {obj["progress"]:.4f}' ) obj["image"].save("""diffusers_library_progress.jpg""" ) UpperCAmelCase_: Optional[int] = pipe(image=lowerCAmelCase__ , prompt="""Black font, white background, vector""" , noise_level=4_0 , callback=lowerCAmelCase__ ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
147
1
def __lowerCamelCase ( ) -> int: """simple docstring""" return 1 def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ ) def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ ) def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ ) def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ ) def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(snake_case__ ) def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(snake_case__ ) def __lowerCamelCase ( snake_case__ = 2_00 ) -> int: """simple docstring""" return two_pound(snake_case__ ) if __name__ == "__main__": print(solution(int(input().strip())))
125
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = prime_factors(snake_case__ ) if is_square_free(snake_case__ ): return -1 if len(snake_case__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
125
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ : int = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ['MaskFormerFeatureExtractor'] UpperCAmelCase_ : List[Any] = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] UpperCAmelCase_ : List[Any] = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
200
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase__ ( _snake_case ): '''simple docstring''' A_ : UNetaDModel A_ : ScoreSdeVeScheduler def __init__( self , __snake_case , __snake_case ): super().__init__() self.register_modules(unet=__snake_case , scheduler=__snake_case ) @torch.no_grad() def __call__( self , __snake_case = 1 , __snake_case = 2000 , __snake_case = None , __snake_case = "pil" , __snake_case = True , **__snake_case , ): _SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size _SCREAMING_SNAKE_CASE : Optional[Any] = (batch_size, 3, img_size, img_size) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet _SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma _SCREAMING_SNAKE_CASE : Any = sample.to(self.device ) self.scheduler.set_timesteps(__snake_case ) self.scheduler.set_sigmas(__snake_case ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): _SCREAMING_SNAKE_CASE : Any = self.unet(__snake_case , __snake_case ).sample _SCREAMING_SNAKE_CASE : Any = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample # prediction step _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__snake_case , __snake_case ).sample _SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = output.prev_sample, output.prev_sample_mean _SCREAMING_SNAKE_CASE : Tuple = sample_mean.clamp(0 , 1 ) _SCREAMING_SNAKE_CASE : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(__snake_case ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__snake_case )
200
1
class __A : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Union[str, Any] =name __UpperCamelCase : Any =value __UpperCamelCase : str =weight def __repr__( self ): """simple docstring""" return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def __lowercase ( self ): """simple docstring""" return self.value def __lowercase ( self ): """simple docstring""" return self.name def __lowercase ( self ): """simple docstring""" return self.weight def __lowercase ( self ): """simple docstring""" return self.value / self.weight def A ( a_ ,a_ ,a_ ) -> List[Any]: __UpperCamelCase : Tuple =[] for i in range(len(lowerCAmelCase__ ) ): menu.append(Things(name[i] ,value[i] ,weight[i] ) ) return menu def A ( a_ ,a_ ,a_ ) -> Union[str, Any]: __UpperCamelCase : Any =sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ,reverse=lowerCAmelCase__ ) __UpperCamelCase : Dict =[] __UpperCamelCase , __UpperCamelCase : Optional[int] =0.0, 0.0 for i in range(len(lowerCAmelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def A ( ) -> List[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
366
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =[ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =[ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =[ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =[ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : List[Any] ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : str =[ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : str ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =[ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] __UpperCamelCase : int ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : str ='fp16' self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =[ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] __UpperCamelCase : Any ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] =[ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] __UpperCamelCase : Tuple ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : Any ='fp16' self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
245
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : List[str] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _UpperCAmelCase ( lowercase__ ): a__ : Optional[int] = '''convbert''' def __init__( self : List[Any] , _lowercase : Optional[int]=3_05_22 , _lowercase : Dict=7_68 , _lowercase : Optional[int]=12 , _lowercase : Union[str, Any]=12 , _lowercase : str=30_72 , _lowercase : Dict="gelu" , _lowercase : Dict=0.1 , _lowercase : Tuple=0.1 , _lowercase : List[str]=5_12 , _lowercase : Optional[Any]=2 , _lowercase : List[Any]=0.02 , _lowercase : Any=1E-12 , _lowercase : int=1 , _lowercase : int=0 , _lowercase : Optional[int]=2 , _lowercase : Optional[int]=7_68 , _lowercase : Union[str, Any]=2 , _lowercase : List[Any]=9 , _lowercase : List[Any]=1 , _lowercase : Dict=None , **_lowercase : List[Any] , ): super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = embedding_size __UpperCAmelCase = head_ratio __UpperCAmelCase = conv_kernel_size __UpperCAmelCase = num_groups __UpperCAmelCase = classifier_dropout class _UpperCAmelCase ( lowercase__ ): @property def a ( self : List[str] ): if self.task == "multiple-choice": __UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
332
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
0
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": _A = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') _A = parser.parse_args() if args.model_type == "roberta": _A = RobertaForMaskedLM.from_pretrained(args.model_name) _A = 'roberta' elif args.model_type == "gpt2": _A = GPTaLMHeadModel.from_pretrained(args.model_name) _A = 'transformer' _A = model.state_dict() _A = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: _A = state_dict[f"""{prefix}.{param_name}"""] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: _A = f"""{prefix}.embeddings.{w}.weight""" _A = state_dict[param_name] for w in ["weight", "bias"]: _A = f"""{prefix}.embeddings.LayerNorm.{w}""" _A = state_dict[param_name] # Transformer Blocks # _A = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: _A = state_dict[ f"""{prefix}.h.{teacher_idx}.{layer}.{w}""" ] _A = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: _A = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}""" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: _A = state_dict[f"""{layer}"""] if args.vocab_transform: for w in ["weight", "bias"]: _A = state_dict[f"""lm_head.dense.{w}"""] _A = state_dict[f"""lm_head.layer_norm.{w}"""] elif args.model_type == "gpt2": for w in ["weight", "bias"]: _A = state_dict[f"""{prefix}.ln_f.{w}"""] _A = state_dict['lm_head.weight'] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
117
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): __UpperCamelCase =SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , ) __UpperCamelCase =DetaConfig( backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , ) # set labels __UpperCamelCase ='huggingface/label-files' if "o365" in model_name: __UpperCamelCase =3_66 __UpperCamelCase ='object365-id2label.json' else: __UpperCamelCase =91 __UpperCamelCase ='coco-detection-id2label.json' __UpperCamelCase =num_labels __UpperCamelCase =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) ) __UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} __UpperCamelCase =idalabel __UpperCamelCase ={v: k for k, v in idalabel.items()} return config def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): __UpperCamelCase =[] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') ) # fmt: on return rename_keys def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =val def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ): __UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __UpperCamelCase =num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __UpperCamelCase =state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' ) __UpperCamelCase =state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __UpperCamelCase =in_proj_weight[:dim, :] __UpperCamelCase =in_proj_bias[: dim] __UpperCamelCase =in_proj_weight[ dim : dim * 2, : ] __UpperCamelCase =in_proj_bias[ dim : dim * 2 ] __UpperCamelCase =in_proj_weight[ -dim :, : ] __UpperCamelCase =in_proj_bias[-dim :] # fmt: on def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ): # transformer decoder self-attention layers __UpperCamelCase =config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention __UpperCamelCase =state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) __UpperCamelCase =state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __UpperCamelCase =in_proj_weight[:hidden_size, :] __UpperCamelCase =in_proj_bias[:hidden_size] __UpperCamelCase =in_proj_weight[ hidden_size : hidden_size * 2, : ] __UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2] __UpperCamelCase =in_proj_weight[-hidden_size:, :] __UpperCamelCase =in_proj_bias[-hidden_size:] def _UpperCAmelCase ( ): __UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg' __UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ): __UpperCamelCase =get_deta_config(SCREAMING_SNAKE_CASE__ ) # load original state dict if model_name == "deta-swin-large": __UpperCamelCase =hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": __UpperCamelCase =hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(F'Model name {model_name} not supported' ) __UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) # rename keys __UpperCamelCase =create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: __UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =val if "input_proj" in key: __UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: __UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =val # finally, create HuggingFace model and load state dict __UpperCamelCase =DetaForObjectDetection(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() __UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu' model.to(SCREAMING_SNAKE_CASE__ ) # load image processor __UpperCamelCase =DetaImageProcessor(format='coco_detection' ) # verify our conversion on image __UpperCamelCase =prepare_img() __UpperCamelCase =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ) __UpperCamelCase =encoding['pixel_values'] __UpperCamelCase =model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": __UpperCamelCase =torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) __UpperCamelCase =torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": __UpperCamelCase =torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) __UpperCamelCase =torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(F'jozhang97/{model_name}' ) processor.push_to_hub(F'jozhang97/{model_name}' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _A = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
117
1
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): A_ : int A_ : jnp.dtype = jnp.floataa def a (self : str ): """simple docstring""" __snake_case = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , a__ : Any ): """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case = hidden_states.shape __snake_case = jax.image.resize( a__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) __snake_case = self.conv(a__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): A_ : int A_ : jnp.dtype = jnp.floataa def a (self : Dict ): """simple docstring""" __snake_case = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , a__ : List[Any] ): """simple docstring""" __snake_case = self.conv(a__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): A_ : int A_ : int = None A_ : float = 0.0 A_ : bool = None A_ : jnp.dtype = jnp.floataa def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.in_channels if self.out_channels is None else self.out_channels __snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __snake_case = nn.Conv( a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case = nn.Dense(a__ , dtype=self.dtype ) __snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __snake_case = nn.Dropout(self.dropout_prob ) __snake_case = nn.Conv( a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __snake_case = None if use_nin_shortcut: __snake_case = nn.Conv( a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__(self : Optional[Any] , a__ : int , a__ : List[Any] , a__ : str=True ): """simple docstring""" __snake_case = hidden_states __snake_case = self.norma(a__ ) __snake_case = nn.swish(a__ ) __snake_case = self.conva(a__ ) __snake_case = self.time_emb_proj(nn.swish(a__ ) ) __snake_case = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 ) __snake_case = hidden_states + temb __snake_case = self.norma(a__ ) __snake_case = nn.swish(a__ ) __snake_case = self.dropout(a__ , a__ ) __snake_case = self.conva(a__ ) if self.conv_shortcut is not None: __snake_case = self.conv_shortcut(a__ ) return hidden_states + residual
24
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
0
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , *, _UpperCAmelCase = 4 , _UpperCAmelCase = 768 , _UpperCAmelCase , _UpperCAmelCase , ): '''simple docstring''' super().__init__() __A : Tuple = nn.Parameter(torch.zeros(_UpperCAmelCase)) # parameters for additional clip time embeddings __A : int = nn.Linear(_UpperCAmelCase , _UpperCAmelCase) __A : List[str] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase) # parameters for encoder hidden states __A : str = clip_extra_context_tokens __A : Optional[int] = nn.Linear( _UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim) __A : Union[str, Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase) __A : Tuple = nn.LayerNorm(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , *, _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __A : str = image_embeddings.shape[0] __A : Union[str, Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) __A : Optional[Any] = classifier_free_guidance_embeddings.expand( _UpperCAmelCase , -1) __A : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __A : int = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __A : int = self.embedding_proj(_UpperCAmelCase) __A : Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(_UpperCAmelCase) __A : Any = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __A : Tuple = self.clip_extra_context_tokens_proj(_UpperCAmelCase) __A : List[Any] = clip_extra_context_tokens.reshape(_UpperCAmelCase , -1 , self.clip_extra_context_tokens) __A : List[str] = clip_extra_context_tokens.permute(0 , 2 , 1) __A : int = self.encoder_hidden_states_proj(_UpperCAmelCase) __A : Union[str, Any] = self.text_encoder_hidden_states_norm(_UpperCAmelCase) __A : int = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
190
'''simple docstring''' import math def _lowerCAmelCase ( __snake_case : int ) -> int: if not isinstance(__snake_case , __snake_case ): __A : List[Any] = f'Input value of [number={number}] must be an integer' raise TypeError(__snake_case ) if number < 1: __A : Union[str, Any] = f'Input value of [number={number}] must be > 0' raise ValueError(__snake_case ) elif number == 1: return 3 elif number == 2: return 5 else: __A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2 __A : Union[str, Any] = [3, 5] __A : List[Any] = 2 __A : Optional[Any] = 3 for block in range(1 , __snake_case ): for _ in range(__snake_case ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): lowercase__ : str = 0 try: lowercase__ : List[str] = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
190
1
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCAmelCase__ ( ) -> Any: __lowerCamelCase : List[Any] = 9, 14 # noqa: F841 __lowerCamelCase : int = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowerCamelCase : Union[str, Any] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __lowerCamelCase : str = mst(__UpperCamelCase ) __lowerCamelCase : Union[str, Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __lowerCamelCase : Tuple = tuple(answer[:2] ) __lowerCamelCase : Dict = tuple(edge[::-1] ) assert edge in result or reverse in result
185
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def UpperCamelCase( __UpperCamelCase : List[str] ): lowerCAmelCase_ : List[str] = SwinvaConfig() lowerCAmelCase_ : List[str] = swinva_name.split('''_''' ) lowerCAmelCase_ : str = name_split[1] if "to" in name_split[3]: lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] ) else: lowerCAmelCase_ : List[Any] = int(name_split[3] ) if "to" in name_split[2]: lowerCAmelCase_ : List[str] = int(name_split[2][-2:] ) else: lowerCAmelCase_ : int = int(name_split[2][6:] ) if model_size == "tiny": lowerCAmelCase_ : Any = 96 lowerCAmelCase_ : List[str] = (2, 2, 6, 2) lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24) elif model_size == "small": lowerCAmelCase_ : List[str] = 96 lowerCAmelCase_ : Any = (2, 2, 18, 2) lowerCAmelCase_ : Dict = (3, 6, 12, 24) elif model_size == "base": lowerCAmelCase_ : Union[str, Any] = 128 lowerCAmelCase_ : List[Any] = (2, 2, 18, 2) lowerCAmelCase_ : Tuple = (4, 8, 16, 32) else: lowerCAmelCase_ : Optional[Any] = 192 lowerCAmelCase_ : List[Any] = (2, 2, 18, 2) lowerCAmelCase_ : List[Any] = (6, 12, 24, 48) if "to" in swinva_name: lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): lowerCAmelCase_ : Optional[int] = 21841 lowerCAmelCase_ : Any = '''huggingface/label-files''' lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json''' lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) ) lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase_ : str = idalabel lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()} else: lowerCAmelCase_ : Optional[int] = 1000 lowerCAmelCase_ : Tuple = '''huggingface/label-files''' lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json''' lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) ) lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase_ : List[str] = idalabel lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()} lowerCAmelCase_ : Optional[int] = img_size lowerCAmelCase_ : Dict = num_classes lowerCAmelCase_ : Dict = embed_dim lowerCAmelCase_ : Optional[Any] = depths lowerCAmelCase_ : Optional[int] = num_heads lowerCAmelCase_ : Dict = window_size return config def UpperCamelCase( __UpperCamelCase : List[str] ): if "patch_embed.proj" in name: lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ : int = '''encoder.''' + name if "attn.proj" in name: lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' ) if "norm1" in name: lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' ) if "q_bias" in name: lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' ) if "k_bias" in name: lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' ) if "v_bias" in name: lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' ) if "cpb_mlp" in name: lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' ) if name == "norm.weight": lowerCAmelCase_ : Dict = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ : Any = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' ) else: lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name return name def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase ) if "mask" in key: continue elif "qkv" in key: lowerCAmelCase_ : Dict = key.split('''.''' ) lowerCAmelCase_ : Any = int(key_split[1] ) lowerCAmelCase_ : Optional[int] = int(key_split[3] ) lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase_ : Optional[Any] = val[:dim, :] lowerCAmelCase_ : Any = val[dim : dim * 2, :] lowerCAmelCase_ : List[Any] = val[-dim:, :] else: lowerCAmelCase_ : Dict = val[:dim] lowerCAmelCase_ : Union[str, Any] = val[ dim : dim * 2 ] lowerCAmelCase_ : Dict = val[-dim:] else: lowerCAmelCase_ : Optional[Any] = val return orig_state_dict def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ): lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase ) model.eval() lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) ) lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' ) lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] ) lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__UpperCamelCase ) model.push_to_hub( repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,) if __name__ == "__main__": A__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Optional[Any] = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
103
0
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class __snake_case ( nn.Module ): def __init__( self ): '''simple docstring''' super().__init__() lowercase : Optional[int] = nn.Linear(3 ,4 ) lowercase : Tuple = nn.BatchNormad(4 ) lowercase : int = nn.Linear(4 ,5 ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case ) ) ) class __snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case ,model.state_dict() ) lowercase : Dict = os.path.join(snake_case ,"""index.json""" ) self.assertTrue(os.path.isfile(snake_case ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: lowercase : Union[str, Any] = os.path.join(snake_case ,f"{key}.dat" ) self.assertTrue(os.path.isfile(snake_case ) ) # TODO: add tests on the fact weights are properly loaded def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: lowercase : Any = torch.randn(2 ,3 ,dtype=snake_case ) with TemporaryDirectory() as tmp_dir: lowercase : Optional[Any] = offload_weight(snake_case ,"""weight""" ,snake_case ,{} ) lowercase : Dict = os.path.join(snake_case ,"""weight.dat""" ) self.assertTrue(os.path.isfile(snake_case ) ) self.assertDictEqual(snake_case ,{"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} ) lowercase : int = load_offloaded_weight(snake_case ,index["""weight"""] ) self.assertTrue(torch.equal(snake_case ,snake_case ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = ModelForTest() lowercase : Tuple = model.state_dict() lowercase : str = {k: v for k, v in state_dict.items() if """linear2""" not in k} lowercase : Dict = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case ,snake_case ) lowercase : int = OffloadedWeightsLoader(state_dict=snake_case ,save_folder=snake_case ) # Every key is there with the right value self.assertEqual(sorted(snake_case ) ,sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(snake_case ,weight_map[key] ) ) lowercase : Optional[int] = {k: v for k, v in state_dict.items() if """weight""" in k} lowercase : Optional[int] = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case ,snake_case ) lowercase : List[str] = OffloadedWeightsLoader(state_dict=snake_case ,save_folder=snake_case ) # Every key is there with the right value self.assertEqual(sorted(snake_case ) ,sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(snake_case ,weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case ,snake_case ) # Duplicates are removed lowercase : Optional[int] = OffloadedWeightsLoader(state_dict=snake_case ,save_folder=snake_case ) # Every key is there with the right value self.assertEqual(sorted(snake_case ) ,sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(snake_case ,weight_map[key] ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} lowercase : Tuple = extract_submodules_state_dict(snake_case ,["""a.1""", """a.2"""] ) self.assertDictEqual(snake_case ,{"""a.1""": 0, """a.2""": 2} ) lowercase : Tuple = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} lowercase : Tuple = extract_submodules_state_dict(snake_case ,["""a.1""", """a.2"""] ) self.assertDictEqual(snake_case ,{"""a.1.a""": 0, """a.2.a""": 2} )
285
from ...processing_utils import ProcessorMixin class __snake_case ( lowerCAmelCase ): _a : Union[str, Any]= "WhisperFeatureExtractor" _a : int= "WhisperTokenizer" def __init__( self ,snake_case ,snake_case ): '''simple docstring''' super().__init__(snake_case ,snake_case ) lowercase : Optional[int] = self.feature_extractor lowercase : Tuple = False def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ,snake_case=True ): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=snake_case ,language=snake_case ,no_timestamps=snake_case ) def __call__( self ,*snake_case ,**snake_case ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*snake_case ,**snake_case ) lowercase : Optional[Any] = kwargs.pop("""audio""" ,snake_case ) lowercase : str = kwargs.pop("""sampling_rate""" ,snake_case ) lowercase : Dict = kwargs.pop("""text""" ,snake_case ) if len(snake_case ) > 0: lowercase : List[Any] = args[0] lowercase : Tuple = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: lowercase : Any = self.feature_extractor(snake_case ,*snake_case ,sampling_rate=snake_case ,**snake_case ) if text is not None: lowercase : str = self.tokenizer(snake_case ,**snake_case ) if text is None: return inputs elif audio is None: return encodings else: lowercase : List[Any] = encodings["""input_ids"""] return inputs def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ): '''simple docstring''' return self.tokenizer.decode(*snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case="np" ): '''simple docstring''' return self.tokenizer.get_prompt_ids(snake_case ,return_tensors=snake_case )
285
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
"""simple docstring""" import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification A__ : Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co A__ : Tuple = 'main' # Default branch name A__ : Optional[Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2' # One particular commit (not the top of `main`) A__ : Tuple = 'aaaaaaa' # This commit does not exist, so we should 404. A__ : Tuple = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684' # Sha-1 of config.json on the top of `main`, for checking purposes A__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3' @contextlib.contextmanager def _snake_case ( ) -> str: print("Welcome!" ) yield print("Bye!" ) @contextlib.contextmanager def _snake_case ( ) -> List[str]: print("Bonjour!" ) yield print("Au revoir!" ) class lowercase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[int] ): # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers" ) is not None class lowercase__ ( unittest.TestCase ): @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def UpperCAmelCase__ ( self : Tuple , snake_case__ : int ): with ContextManagers([] ): print("Transformers are awesome!" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] ): with ContextManagers([context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] ): with ContextManagers([context_fr(), context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" ) @require_torch def UpperCAmelCase__ ( self : int ): self.assertEqual(find_labels(snake_case__ ) , ["labels"] ) self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] ) class lowercase__ ( snake_case__ ): pass self.assertEqual(find_labels(snake_case__ ) , ["labels"] ) @require_tf def UpperCAmelCase__ ( self : int ): self.assertEqual(find_labels(snake_case__ ) , ["labels"] ) self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] ) class lowercase__ ( snake_case__ ): pass self.assertEqual(find_labels(snake_case__ ) , ["labels"] ) @require_flax def UpperCAmelCase__ ( self : Tuple ): # Flax models don't have labels self.assertEqual(find_labels(snake_case__ ) , [] ) self.assertEqual(find_labels(snake_case__ ) , [] ) self.assertEqual(find_labels(snake_case__ ) , [] ) class lowercase__ ( snake_case__ ): pass self.assertEqual(find_labels(snake_case__ ) , [] )
144
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self : Tuple ): UpperCamelCase_: List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) UpperCamelCase_: List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" UpperCamelCase_: Union[str, Any] = model(snake_case_ )["""last_hidden_state"""] UpperCamelCase_: Union[str, Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , snake_case_ ) # compare the actual values for a slice. UpperCamelCase_: Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
223
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCamelCase : '''simple docstring''' __UpperCamelCase : str = PegasusConfig __UpperCamelCase : str = {} __UpperCamelCase : Optional[Any] = """gelu""" def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str=13 , snake_case_ : Dict=7 , snake_case_ : List[Any]=True , snake_case_ : Optional[int]=False , snake_case_ : Any=99 , snake_case_ : Optional[Any]=32 , snake_case_ : Dict=2 , snake_case_ : Any=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=40 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , ): UpperCamelCase_: List[str] = parent UpperCamelCase_: Optional[Any] = batch_size UpperCamelCase_: Union[str, Any] = seq_length UpperCamelCase_: Tuple = is_training UpperCamelCase_: Tuple = use_labels UpperCamelCase_: Tuple = vocab_size UpperCamelCase_: Tuple = hidden_size UpperCamelCase_: Optional[Any] = num_hidden_layers UpperCamelCase_: List[Any] = num_attention_heads UpperCamelCase_: Optional[int] = intermediate_size UpperCamelCase_: Dict = hidden_dropout_prob UpperCamelCase_: str = attention_probs_dropout_prob UpperCamelCase_: Optional[int] = max_position_embeddings UpperCamelCase_: Union[str, Any] = eos_token_id UpperCamelCase_: Optional[int] = pad_token_id UpperCamelCase_: List[Any] = bos_token_id def lowerCAmelCase__ ( self : str ): UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_: Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_: List[str] = prepare_pegasus_inputs_dict(snake_case_ , snake_case_ , snake_case_ ) return config, inputs_dict def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] , snake_case_ : Dict ): UpperCamelCase_: Any = TFPegasusModel(config=snake_case_ ).get_decoder() UpperCamelCase_: Any = inputs_dict["""input_ids"""] UpperCamelCase_: int = input_ids[:1, :] UpperCamelCase_: List[str] = inputs_dict["""attention_mask"""][:1, :] UpperCamelCase_: Tuple = inputs_dict["""head_mask"""] UpperCamelCase_: int = 1 # first forward pass UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ ) UpperCamelCase_, UpperCamelCase_: List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase_: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ )[0] UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCamelCase_: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx] UpperCamelCase_: int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 ) def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]: if attention_mask is None: UpperCamelCase_: Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCamelCase_: str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCamelCase_: str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCamelCase ( _A , _A , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __UpperCamelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase : int = ( { """conversational""": TFPegasusForConditionalGeneration, """feature-extraction""": TFPegasusModel, """summarization""": TFPegasusForConditionalGeneration, """text2text-generation""": TFPegasusForConditionalGeneration, """translation""": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Any = False __UpperCamelCase : Dict = False def lowerCAmelCase__ ( self : Dict ): UpperCamelCase_: Tuple = TFPegasusModelTester(self ) UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ ) def lowerCAmelCase__ ( self : Dict ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : Optional[int] ): UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCamelCase : Optional[int] = [ """California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to""" """ reduce the risk of wildfires.""", """N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""", ] # differs slightly from pytorch, likely due to numerical differences in linear layers __UpperCamelCase : Union[str, Any] = """google/pegasus-xsum""" @cached_property def lowerCAmelCase__ ( self : Dict ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase__ ( self : int ): UpperCamelCase_: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ): UpperCamelCase_: str = self.translate_src_text(**snake_case_ ) assert self.expected_text == generated_words def lowerCAmelCase__ ( self : Optional[Any] , **snake_case_ : int ): UpperCamelCase_: Tuple = self.tokenizer(self.src_text , **snake_case_ , padding=snake_case_ , return_tensors="""tf""" ) UpperCamelCase_: Tuple = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case_ , ) UpperCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ ) return generated_words @slow def lowerCAmelCase__ ( self : Optional[Any] ): self._assert_generated_batch_equal_expected()
223
1