code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Any=13, lowerCamelCase : Optional[Any]=30, lowerCamelCase : Any=2, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Optional[Any]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[str]=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : List[str]=10, lowerCamelCase : int=0.02, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Optional[int]=0.6, lowerCamelCase : Any=None, )-> Any: lowerCamelCase__ : List[Any] =parent lowerCamelCase__ : List[Any] =batch_size lowerCamelCase__ : Optional[int] =image_size lowerCamelCase__ : List[str] =patch_size lowerCamelCase__ : List[Any] =num_channels lowerCamelCase__ : int =is_training lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : Dict =hidden_size lowerCamelCase__ : int =num_hidden_layers lowerCamelCase__ : Dict =num_attention_heads lowerCamelCase__ : List[Any] =intermediate_size lowerCamelCase__ : List[Any] =hidden_act lowerCamelCase__ : List[str] =hidden_dropout_prob lowerCamelCase__ : List[Any] =attention_probs_dropout_prob lowerCamelCase__ : List[Any] =type_sequence_label_size lowerCamelCase__ : str =initializer_range lowerCamelCase__ : Union[str, Any] =mask_ratio lowerCamelCase__ : Union[str, Any] =scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str =(image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Dict =None if self.use_labels: lowerCamelCase__ : List[str] =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : Dict =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Tuple: return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def snake_case ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Dict )-> List[Any]: lowerCamelCase__ : Any =ViTMAEModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[int] =model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =ViTMAEForPreTraining(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Any =model(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =(self.image_size // self.patch_size) ** 2 lowerCamelCase__ : int =self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Optional[Any] =1 lowerCamelCase__ : str =ViTMAEForPreTraining(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Any =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase ) lowerCamelCase__ : str =self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[str] =self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () _a = {'feature-extraction': ViTMAEModel} if is_torch_available() else {} _a = False _a = False _a = False _a = False def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : int =ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : List[Any] )-> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def snake_case ( self : Tuple )-> Optional[Any]: pass def snake_case ( self : Tuple )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int =model_class(lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowerCamelCase__ : Union[str, Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) ) def snake_case ( self : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[Any] =model_class(lowerCamelCase ) lowerCamelCase__ : List[Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Union[str, Any] =[*signature.parameters.keys()] lowerCamelCase__ : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : List[str] )-> Optional[int]: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def snake_case ( self : Dict )-> Optional[Any]: lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase ) def snake_case ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : Optional[int] )-> int: # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Optional[int] =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : Optional[Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : List[Any] =torch.from_numpy(lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Dict =pt_noise super().check_pt_tf_models(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> int: lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : List[str] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Any =outputs[0].cpu().numpy() lowerCamelCase__ : Optional[Any] =0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Tuple =model_class.from_pretrained(lowerCamelCase ) model.to(lowerCamelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : str =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) # Make sure we don't have nans lowerCamelCase__ : Tuple =after_outputs[0].cpu().numpy() lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Optional[int] =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def snake_case ( self : Any )-> Union[str, Any]: pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def snake_case ( self : Union[str, Any] )-> List[Any]: pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def snake_case ( self : Optional[Any] )-> int: pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def snake_case ( self : Tuple )-> Union[str, Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : Union[str, Any] )-> Dict: pass @slow def snake_case ( self : Optional[Any] )-> Union[str, Any]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any =ViTMAEModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case ( self : Any )-> Union[str, Any]: return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def snake_case ( self : Dict )-> Tuple: # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : Dict =ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase ) lowerCamelCase__ : int =self.default_image_processor lowerCamelCase__ : Tuple =prepare_img() lowerCamelCase__ : str =image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : str =ViTMAEConfig() lowerCamelCase__ : Dict =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Dict =np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase, noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) ) # verify the logits lowerCamelCase__ : Optional[int] =torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(lowerCamelCase ), atol=1E-4 ) )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if index == number_of_items: return 0 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ : Dict =values[index] + knapsack( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : int = logging.get_logger(__name__) _lowercase : List[str] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'ctrl' _a = ['past_key_values'] _a = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any], lowerCamelCase : Any=24_6534, lowerCamelCase : Dict=256, lowerCamelCase : Tuple=1280, lowerCamelCase : List[Any]=8192, lowerCamelCase : List[str]=48, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : int=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : Dict=1E-6, lowerCamelCase : str=0.02, lowerCamelCase : str=True, **lowerCamelCase : Optional[Any], )-> Dict: lowerCamelCase__ : Optional[Any] =vocab_size lowerCamelCase__ : Optional[int] =n_positions lowerCamelCase__ : List[Any] =n_embd lowerCamelCase__ : Union[str, Any] =n_layer lowerCamelCase__ : Dict =n_head lowerCamelCase__ : str =dff lowerCamelCase__ : List[str] =resid_pdrop lowerCamelCase__ : int =embd_pdrop lowerCamelCase__ : Any =layer_norm_epsilon lowerCamelCase__ : int =initializer_range lowerCamelCase__ : Any =use_cache super().__init__(**lowerCamelCase )
625
"""simple docstring""" _lowercase : Optional[Any] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
625
1
"""simple docstring""" import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Tuple = "▁" _lowercase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = BertGenerationTokenizer _a = False _a = True def snake_case ( self : List[Any] )-> str: super().setUp() lowerCamelCase__ : List[str] =BertGenerationTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : List[str] )-> str: lowerCamelCase__ : Optional[int] ='''<s>''' lowerCamelCase__ : Tuple =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ), lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : int )-> List[str]: lowerCamelCase__ : str =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<unk>''' ) self.assertEqual(vocab_keys[1], '''<s>''' ) self.assertEqual(vocab_keys[-1], '''<pad>''' ) self.assertEqual(len(lowerCamelCase ), 1002 ) def snake_case ( self : int )-> Tuple: self.assertEqual(self.get_tokenizer().vocab_size, 1000 ) def snake_case ( self : Any )-> Dict: lowerCamelCase__ : int =BertGenerationTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) lowerCamelCase__ : Any =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [285, 46, 10, 170, 382], ) lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCamelCase__ : List[str] =tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) lowerCamelCase__ : Optional[Any] =tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) @cached_property def snake_case ( self : int )-> List[str]: return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def snake_case ( self : Dict )-> Optional[Any]: lowerCamelCase__ : List[str] ='''Hello World!''' lowerCamelCase__ : Tuple =[1_8536, 2260, 101] self.assertListEqual(lowerCamelCase, self.big_tokenizer.encode(lowerCamelCase ) ) @slow def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : Tuple =( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) lowerCamelCase__ : str =[ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, ] self.assertListEqual(lowerCamelCase, self.big_tokenizer.encode(lowerCamelCase ) ) @require_torch @slow def snake_case ( self : int )-> List[Any]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase__ : Union[str, Any] =list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCamelCase__ : List[str] =''' '''.join(lowerCamelCase ) lowerCamelCase__ : Tuple =self.big_tokenizer.encode_plus(lowerCamelCase, return_tensors='''pt''', return_token_type_ids=lowerCamelCase ) lowerCamelCase__ : str =self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence], return_tensors='''pt''', return_token_type_ids=lowerCamelCase ) lowerCamelCase__ : int =BertGenerationConfig() lowerCamelCase__ : List[str] =BertGenerationEncoder(lowerCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowerCamelCase ) model(**lowerCamelCase ) @slow def snake_case ( self : Tuple )-> Optional[int]: # fmt: off lowerCamelCase__ : int ={'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase, model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''', revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''', )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[int] ): """simple docstring""" if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCamelCase__ : Any =numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products lowerCamelCase__ : Dict =numbers[i] if number < 0: lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number ) lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase ) return max_prod
625
1
"""simple docstring""" _lowercase : Union[str, Any] = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) _lowercase : Tuple = frozenset(["prompt", "negative_prompt"]) _lowercase : Optional[Any] = frozenset([]) _lowercase : Optional[Any] = frozenset(["image"]) _lowercase : Union[str, Any] = frozenset( [ "image", "height", "width", "guidance_scale", ] ) _lowercase : int = frozenset(["image"]) _lowercase : Any = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) _lowercase : List[str] = frozenset(["prompt", "image", "negative_prompt"]) _lowercase : str = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) _lowercase : List[Any] = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) _lowercase : Dict = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) _lowercase : str = frozenset(["image", "mask_image"]) _lowercase : Tuple = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) _lowercase : str = frozenset(["example_image", "image", "mask_image"]) _lowercase : Optional[Any] = frozenset(["class_labels"]) _lowercase : Any = frozenset(["class_labels"]) _lowercase : str = frozenset(["batch_size"]) _lowercase : Optional[int] = frozenset([]) _lowercase : Any = frozenset(["batch_size"]) _lowercase : Union[str, Any] = frozenset([]) _lowercase : str = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) _lowercase : Optional[Any] = frozenset(["prompt", "negative_prompt"]) _lowercase : List[str] = frozenset(["input_tokens"]) _lowercase : str = frozenset(["input_tokens"])
625
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 42 _a = 42 class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' _a = 42 _a = (1_6, 3_2, 9_6, 2_5_6) _a = jnp.floataa def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Tuple =nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) lowerCamelCase__ : Dict =[] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ : Dict =self.block_out_channels[i] lowerCamelCase__ : Dict =self.block_out_channels[i + 1] lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Optional[int] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Any =blocks lowerCamelCase__ : Optional[int] =nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase ) lowerCamelCase__ : Dict =nn.silu(lowerCamelCase ) for block in self.blocks: lowerCamelCase__ : str =block(lowerCamelCase ) lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase ) lowerCamelCase__ : Any =self.conv_out(lowerCamelCase ) return embedding @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' _a = 3_2 _a = 4 _a = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _a = False _a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _a = 2 _a = 8 _a = None _a = 1_2_8_0 _a = 0.0 _a = False _a = jnp.floataa _a = True _a = 0 _a = "rgb" _a = (1_6, 3_2, 9_6, 2_5_6) def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict: # init input tensors lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa ) lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase ) lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"] def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Optional[int] =self.block_out_channels lowerCamelCase__ : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : int =nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time lowerCamelCase__ : str =FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype ) lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) lowerCamelCase__ : Dict =self.only_cross_attention if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Dict =[] lowerCamelCase__ : List[Any] =block_out_channels[0] lowerCamelCase__ : List[Any] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] =output_channel lowerCamelCase__ : str =block_out_channels[i] lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: lowerCamelCase__ : List[Any] =FlaxDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(lowerCamelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) if not is_final_block: lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) lowerCamelCase__ : int =down_blocks lowerCamelCase__ : List[str] =controlnet_down_blocks # mid lowerCamelCase__ : Tuple =block_out_channels[-1] lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase__ : int =self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 ) # 1. time if not isinstance(lowerCamelCase, jnp.ndarray ): lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 ) lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase ) # 2. pre-process lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase ) lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ : Union[str, Any] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) # 5. contronet blocks lowerCamelCase__ : Optional[Any] =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ): lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : List[str] =controlnet_down_block_res_samples lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase ) # 6. scaling lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
625
1
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =VideoMAEConfig() set_architecture_configs(__lowerCamelCase , __lowerCamelCase ) if "finetuned" not in model_name: lowerCamelCase__ : int =False if "finetuned" in model_name: lowerCamelCase__ : str ='''huggingface/label-files''' if "kinetics" in model_name: lowerCamelCase__ : List[Any] =400 lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json''' elif "ssv2" in model_name: lowerCamelCase__ : Tuple =174 lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Dict =idalabel lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()} return config def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): """simple docstring""" if "small" in model_name: lowerCamelCase__ : Optional[Any] =384 lowerCamelCase__ : List[Any] =1536 lowerCamelCase__ : int =12 lowerCamelCase__ : Dict =16 lowerCamelCase__ : List[Any] =12 lowerCamelCase__ : Optional[Any] =3 lowerCamelCase__ : Union[str, Any] =192 lowerCamelCase__ : str =768 elif "large" in model_name: lowerCamelCase__ : Union[str, Any] =1024 lowerCamelCase__ : str =4096 lowerCamelCase__ : int =24 lowerCamelCase__ : Dict =16 lowerCamelCase__ : Union[str, Any] =12 lowerCamelCase__ : List[Any] =8 lowerCamelCase__ : int =512 lowerCamelCase__ : Optional[Any] =2048 elif "huge" in model_name: lowerCamelCase__ : Optional[int] =1280 lowerCamelCase__ : Optional[int] =5120 lowerCamelCase__ : List[Any] =32 lowerCamelCase__ : List[Any] =16 lowerCamelCase__ : Optional[Any] =12 lowerCamelCase__ : Dict =8 lowerCamelCase__ : List[Any] =640 lowerCamelCase__ : Any =2560 elif "base" not in model_name: raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" if "encoder." in name: lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' ) if "cls_token" in name: lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' ) if "attn" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' ) return name def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase ) if key.startswith('''encoder.''' ): lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' ) if "qkv" in key: lowerCamelCase__ : Any =key.split('''.''' ) if key.startswith('''decoder.blocks''' ): lowerCamelCase__ : Tuple =config.decoder_hidden_size lowerCamelCase__ : str =int(key_split[2] ) lowerCamelCase__ : Any ='''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase__ : List[Any] =val[:dim, :] lowerCamelCase__ : Any =val[dim : dim * 2, :] lowerCamelCase__ : Dict =val[-dim:, :] else: lowerCamelCase__ : Optional[Any] =config.hidden_size lowerCamelCase__ : Optional[Any] =int(key_split[1] ) lowerCamelCase__ : str ='''videomae.encoder.layer.''' if "weight" in key: lowerCamelCase__ : int =val[:dim, :] lowerCamelCase__ : Tuple =val[dim : dim * 2, :] lowerCamelCase__ : List[Any] =val[-dim:, :] else: lowerCamelCase__ : int =val return orig_state_dict def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : List[Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase ) return list(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase ) if "finetuned" in model_name: lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase ) else: lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase ) # download original checkpoint, hosted on Google Drive lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin''' gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' ) if "model" in files: lowerCamelCase__ : Dict =files['''model'''] else: lowerCamelCase__ : str =files['''module'''] lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # verify model on basic input lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) lowerCamelCase__ : int =prepare_video() lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' ) if "finetuned" not in model_name: lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase ) lowerCamelCase__ : int =model(**__lowerCamelCase ) lowerCamelCase__ : Dict =outputs.logits lowerCamelCase__ : List[str] =[ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase__ : int =torch.Size([1, 174] ) lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase__ : List[str] =torch.Size([1, 400] ) lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase__ : str =torch.Size([1, 400] ) lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase__ : Optional[int] =torch.Size([1, 174] ) lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase__ : str =torch.Size([1, 174] ) lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase__ : str =outputs.loss assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
625
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["CLIPFeatureExtractor"] _lowercase : int = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
1
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
625
"""simple docstring""" import os def snake_case__ ( ): """simple docstring""" with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ : Tuple =str(file.readlines()[0] ) lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : str =0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Dict =0 return total_score if __name__ == "__main__": print(solution())
625
1
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict=1024 ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Any =[], [] lowerCamelCase__ : str =list(zip(__lowerCamelCase , __lowerCamelCase ) ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =sorted_examples[0] def is_too_big(__lowerCamelCase : int ): return tok(__lowerCamelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): lowerCamelCase__ : List[Any] =new_src + ''' ''' + src lowerCamelCase__ : List[Any] =new_tgt + ''' ''' + tgt if is_too_big(__lowerCamelCase ) or is_too_big(__lowerCamelCase ): # cant fit, finalize example finished_src.append(__lowerCamelCase ) finished_tgt.append(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Tuple =src, tgt else: # can fit, keep adding lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__lowerCamelCase ) finished_tgt.append(__lowerCamelCase ) return finished_src, finished_tgt def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Path , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ): """simple docstring""" lowerCamelCase__ : Dict =Path(__lowerCamelCase ) save_path.mkdir(exist_ok=__lowerCamelCase ) for split in ["train"]: lowerCamelCase__ , lowerCamelCase__ : str =data_dir / f'''{split}.source''', data_dir / f'''{split}.target''' lowerCamelCase__ : Optional[int] =[x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()] lowerCamelCase__ : Optional[Any] =[x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()] lowerCamelCase__ , lowerCamelCase__ : List[str] =pack_examples(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) print(f'''packed {split} split from {len(__lowerCamelCase )} examples -> {len(__lowerCamelCase )}.''' ) Path(save_path / f'''{split}.source''' ).open('''w''' ).write('''\n'''.join(__lowerCamelCase ) ) Path(save_path / f'''{split}.target''' ).open('''w''' ).write('''\n'''.join(__lowerCamelCase ) ) for split in ["val", "test"]: lowerCamelCase__ , lowerCamelCase__ : str =data_dir / f'''{split}.source''', data_dir / f'''{split}.target''' shutil.copyfile(__lowerCamelCase , save_path / f'''{split}.source''' ) shutil.copyfile(__lowerCamelCase , save_path / f'''{split}.target''' ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : str =argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=__lowerCamelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=__lowerCamelCase , default=128 ) parser.add_argument('''--data_dir''' , type=__lowerCamelCase ) parser.add_argument('''--save_path''' , type=__lowerCamelCase ) lowerCamelCase__ : Optional[int] =parser.parse_args() lowerCamelCase__ : int =AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
625
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str, lowerCamelCase : int )-> None: lowerCamelCase__ : str =value lowerCamelCase__ : Node | None =None lowerCamelCase__ : Node | None =None class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Node )-> None: lowerCamelCase__ : Any =tree def snake_case ( self : str, lowerCamelCase : Node | None )-> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Dict )-> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Optional[Any] )-> List[Any]: lowerCamelCase__ : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) lowerCamelCase__ : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowerCamelCase__ : Optional[Any] =tokenizer('''Hello there''', return_tensors='''tf''' ).input_ids lowerCamelCase__ : str =tokenizer('''Hi I am''', return_tensors='''tf''' ).input_ids lowerCamelCase__ : int =model(lowerCamelCase, labels=lowerCamelCase ).loss lowerCamelCase__ : Union[str, Any] =-tf.math.reduce_mean(lowerCamelCase ).numpy() lowerCamelCase__ : Dict =-21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
625
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[str] = logging.getLogger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): """simple docstring""" # save results if os.path.exists(__lowerCamelCase ): if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =2 if unlogit: lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase ) lowerCamelCase__ : Tuple =0 return -plogp.sum(dim=-1 ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) ) for row in range(len(__lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) if head_mask is None: lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =0.0 lowerCamelCase__ : Union[str, Any] =0.0 for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase__) , ) : Any =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase__ : int =2 lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCamelCase ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase__ : Dict =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase ) print_ad_tensor(__lowerCamelCase ) return attn_entropy, head_importance, total_loss def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase ) lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold ) lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase__ : List[Any] =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase__ : int =float('''Inf''' ) lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1] if len(__lowerCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 ) lowerCamelCase__ : Optional[Any] =0.0 lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase ) lowerCamelCase__ : Tuple =new_head_mask.clone().detach() print_ad_tensor(__lowerCamelCase ) # Compute metric and head importance again lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Any =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Tuple =1 / loss lowerCamelCase__ : Optional[Any] =datetime.now() - before_time lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] =[ v, ] assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCamelCase ) lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , ) lowerCamelCase__ : str =1 / loss lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCamelCase , args.output_dir ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase__ : List[Any] =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank ) lowerCamelCase__ : Any =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel( __lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase ) elif args.n_gpu > 1: lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase ) # Prepare dataset lowerCamelCase__ : Union[str, Any] =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),) lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase ) lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase ) lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Dict = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
625
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowerCamelCase__ : List[str] ='''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : List[Any] ='''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : int =tax_attention_key lowerCamelCase__ : Optional[int] =tax_attention_out lowerCamelCase__ : List[Any] =tax_attention_query lowerCamelCase__ : Optional[Any] =tax_attention_value lowerCamelCase__ : List[str] =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : Optional[int] =tax_mlp_wi_a lowerCamelCase__ : Optional[int] =tax_mlp_wi_a else: lowerCamelCase__ : Union[str, Any] =tax_mlp_wi lowerCamelCase__ : str =tax_mlp_wo lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : str =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowerCamelCase__ : List[Any] =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel'''] lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel'''] lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel'''] lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : Union[str, Any] =tax_attention_key lowerCamelCase__ : str =tax_attention_out lowerCamelCase__ : Optional[int] =tax_attention_query lowerCamelCase__ : Dict =tax_attention_value lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key lowerCamelCase__ : Any =tax_enc_dec_attention_out lowerCamelCase__ : Any =tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value lowerCamelCase__ : Dict =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : Tuple =tax_mlp_wi_a lowerCamelCase__ : int =tax_mlp_wi_a else: lowerCamelCase__ : List[Any] =tax_mlp_wi lowerCamelCase__ : Dict =tax_mlp_wo lowerCamelCase__ : Tuple =txa_mlp_layer_norm lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowerCamelCase__ : int =txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Tuple =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding'''] lowerCamelCase__ : Dict =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__lowerCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) _lowercase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ): """simple docstring""" if height >= 1: move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) move_disk(__lowerCamelCase , __lowerCamelCase ) move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ): """simple docstring""" print('''moving disk from''' , __lowerCamelCase , '''to''' , __lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =int(input('''Height of hanoi: ''' ).strip() ) move_tower(__lowerCamelCase , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
625
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]: lowerCamelCase__ : List[str] =parent lowerCamelCase__ : Tuple =batch_size lowerCamelCase__ : str =image_size lowerCamelCase__ : Any =num_channels lowerCamelCase__ : Tuple =num_stages lowerCamelCase__ : List[str] =hidden_sizes lowerCamelCase__ : Any =depths lowerCamelCase__ : Union[str, Any] =is_training lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : int =intermediate_size lowerCamelCase__ : Optional[int] =hidden_act lowerCamelCase__ : Dict =type_sequence_label_size lowerCamelCase__ : Tuple =initializer_range lowerCamelCase__ : Any =out_features lowerCamelCase__ : Tuple =num_labels lowerCamelCase__ : Optional[int] =scope lowerCamelCase__ : Optional[int] =num_stages def snake_case ( self : str )-> Optional[int]: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple =None if self.use_labels: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : int =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Any: return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def snake_case ( self : Union[str, Any] )-> Any: return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, ) def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : int =model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any =config_and_inputs lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (UperNetForSemanticSegmentation,) if is_torch_available() else () _a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Optional[Any] =UperNetModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Optional[int] )-> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] )-> Dict: return def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) lowerCamelCase__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple =[*signature.parameters.keys()] lowerCamelCase__ : List[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : Any )-> Union[str, Any]: lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def snake_case ( self : Optional[Any] )-> List[Any]: pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def snake_case ( self : Any )-> List[str]: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : int )-> Any: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : Dict )-> str: pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def snake_case ( self : List[Any] )-> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : Tuple )-> str: pass def snake_case ( self : Optional[int] )-> List[str]: def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ): lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : List[str] =self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[Any] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : str =_config_zero_init(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def snake_case ( self : Any )-> str: pass @slow def snake_case ( self : int )-> Union[str, Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : str )-> Union[str, Any]: lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : List[Any] =prepare_img() lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Any =model(**lowerCamelCase ) lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : List[str] =torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
625
1
"""simple docstring""" _lowercase : List[Any] = [ (1_0_0_0, "M"), (9_0_0, "CM"), (5_0_0, "D"), (4_0_0, "CD"), (1_0_0, "C"), (9_0, "XC"), (5_0, "L"), (4_0, "XL"), (1_0, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : List[Any] ={'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000} lowerCamelCase__ : Tuple =0 lowerCamelCase__ : str =0 while place < len(__lowerCamelCase ): if (place + 1 < len(__lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =[] for arabic, roman in ROMAN: ((lowerCamelCase__) , (lowerCamelCase__)) : List[str] =divmod(__lowerCamelCase , __lowerCamelCase ) result.append(roman * factor ) if number == 0: break return "".join(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ): '''simple docstring''' _a = ['onnx'] def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]: requires_backends(self, ['''onnx'''] ) @classmethod def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]: requires_backends(cls, ['''onnx'''] ) @classmethod def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]: requires_backends(cls, ['''onnx'''] )
625
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def snake_case__ ( __lowerCamelCase : List[str] ): """simple docstring""" lowerCamelCase__ : Optional[int] =np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) lowerCamelCase__ : Any =np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'sigmoid' _a = 'softmax' _a = 'none' @add_end_docstrings( lowerCAmelCase_ , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , ) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = False _a = ClassificationFunction.NONE def __init__( self : Optional[int], **lowerCamelCase : str )-> List[str]: super().__init__(**lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def snake_case ( self : Any, lowerCamelCase : Tuple=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : int="", **lowerCamelCase : Union[str, Any] )-> Any: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" lowerCamelCase__ : Union[str, Any] =tokenizer_kwargs lowerCamelCase__ : Optional[int] ={} if hasattr(self.model.config, '''return_all_scores''' ) and return_all_scores is None: lowerCamelCase__ : Dict =self.model.config.return_all_scores if isinstance(lowerCamelCase, lowerCamelCase ) or top_k is None: lowerCamelCase__ : str =top_k lowerCamelCase__ : str =False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''', lowerCamelCase, ) if return_all_scores: lowerCamelCase__ : int =None else: lowerCamelCase__ : Optional[int] =1 if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : Dict =ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCamelCase__ : List[str] =function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : int, *lowerCamelCase : Dict, **lowerCamelCase : Tuple )-> Union[str, Any]: lowerCamelCase__ : str =super().__call__(*lowerCamelCase, **lowerCamelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCamelCase__ : Union[str, Any] ='''top_k''' not in kwargs if isinstance(args[0], lowerCamelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def snake_case ( self : Union[str, Any], lowerCamelCase : str, **lowerCamelCase : Dict )-> Dict[str, GenericTensor]: lowerCamelCase__ : Union[str, Any] =self.framework if isinstance(lowerCamelCase, lowerCamelCase ): return self.tokenizer(**lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase ) elif isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) == 1 and isinstance(inputs[0], lowerCamelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0], text_pair=inputs[0][1], return_tensors=lowerCamelCase, **lowerCamelCase ) elif isinstance(lowerCamelCase, lowerCamelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : Tuple )-> int: return self.model(**lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=None, lowerCamelCase : Optional[Any]=1, lowerCamelCase : List[Any]=True )-> List[str]: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCamelCase__ : List[str] =ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCamelCase__ : Optional[int] =ClassificationFunction.SOFTMAX elif hasattr(self.model.config, '''function_to_apply''' ) and function_to_apply is None: lowerCamelCase__ : Tuple =self.model.config.function_to_apply else: lowerCamelCase__ : str =ClassificationFunction.NONE lowerCamelCase__ : int =model_outputs['''logits'''][0] lowerCamelCase__ : Tuple =outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCamelCase__ : List[str] =sigmoid(lowerCamelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCamelCase__ : int =softmax(lowerCamelCase ) elif function_to_apply == ClassificationFunction.NONE: lowerCamelCase__ : List[str] =outputs else: raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCamelCase__ : Tuple =[ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(lowerCamelCase ) ] if not _legacy: dict_scores.sort(key=lambda lowerCamelCase : x["score"], reverse=lowerCamelCase ) if top_k is not None: lowerCamelCase__ : Dict =dict_scores[:top_k] return dict_scores
625
"""simple docstring""" import colorsys from PIL import Image # type: ignore def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =x lowerCamelCase__ : Any =y for step in range(__lowerCamelCase ): # noqa: B007 lowerCamelCase__ : List[Any] =a * a - b * b + x lowerCamelCase__ : Optional[int] =2 * a * b + y lowerCamelCase__ : Union[str, Any] =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) ) lowerCamelCase__ : Optional[int] =img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
625
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : str )-> Dict: lowerCamelCase__ : Tuple =dataset lowerCamelCase__ : Optional[Any] =process lowerCamelCase__ : Any =params def __len__( self : Any )-> Optional[Any]: return len(self.dataset ) def __getitem__( self : str, lowerCamelCase : List[str] )-> List[Any]: lowerCamelCase__ : str =self.dataset[i] lowerCamelCase__ : List[str] =self.process(lowerCamelCase, **self.params ) return processed class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None )-> int: lowerCamelCase__ : str =loader lowerCamelCase__ : Union[str, Any] =infer lowerCamelCase__ : Optional[int] =params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase__ : int =None lowerCamelCase__ : Optional[Any] =loader_batch_size # Internal bookkeeping lowerCamelCase__ : Optional[Any] =None lowerCamelCase__ : str =None def __len__( self : Optional[int] )-> Tuple: return len(self.loader ) def __iter__( self : Any )-> Optional[Any]: lowerCamelCase__ : Optional[int] =iter(self.loader ) return self def snake_case ( self : List[str] )-> Optional[Any]: if isinstance(self._loader_batch_data, torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase__ : Dict =self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase__ : Optional[int] ={} for k, element in self._loader_batch_data.items(): if isinstance(lowerCamelCase, lowerCamelCase ): # Convert ModelOutput to tuple first lowerCamelCase__ : Optional[int] =element.to_tuple() if isinstance(element[0], torch.Tensor ): lowerCamelCase__ : Any =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0], np.ndarray ): lowerCamelCase__ : Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase, lowerCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0], torch.Tensor ): lowerCamelCase__ : List[str] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0], np.ndarray ): lowerCamelCase__ : Tuple =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase__ : List[Any] =None elif isinstance(element[self._loader_batch_index], torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : str =element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index], np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : Union[str, Any] =np.expand_dims(element[self._loader_batch_index], 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase__ : Optional[int] =element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase__ : Optional[int] =self._loader_batch_data.__class__(lowerCamelCase ) self._loader_batch_index += 1 return result def snake_case ( self : str )-> int: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase__ : Union[str, Any] =next(self.iterator ) lowerCamelCase__ : List[str] =self.infer(lowerCamelCase, **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowerCamelCase, torch.Tensor ): lowerCamelCase__ : Union[str, Any] =processed else: lowerCamelCase__ : Union[str, Any] =list(processed.keys() )[0] lowerCamelCase__ : List[str] =processed[key] if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : Dict =len(lowerCamelCase ) else: lowerCamelCase__ : Optional[Any] =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : List[Any] =observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase__ : Any =processed lowerCamelCase__ : Any =0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int]=None )-> List[str]: super().__init__(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def __iter__( self : int )-> int: lowerCamelCase__ : List[Any] =iter(self.loader ) lowerCamelCase__ : Optional[int] =None return self def snake_case ( self : List[Any] )-> List[str]: if self.subiterator is None: lowerCamelCase__ : List[str] =self.infer(next(self.iterator ), **self.params ) try: # Try to return next item lowerCamelCase__ : str =next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase__ : int =self.infer(next(self.iterator ), **self.params ) lowerCamelCase__ : Union[str, Any] =next(self.subiterator ) return processed class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __iter__( self : int )-> Union[str, Any]: lowerCamelCase__ : int =iter(self.loader ) return self def snake_case ( self : List[Any] )-> List[str]: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase__ : Dict =False lowerCamelCase__ : str =[] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : Tuple =self.loader_batch_item() lowerCamelCase__ : List[Any] =item.pop('''is_last''' ) accumulator.append(lowerCamelCase ) if is_last: return accumulator while not is_last: lowerCamelCase__ : Optional[int] =self.infer(next(self.iterator ), **self.params ) if self.loader_batch_size is not None: if isinstance(lowerCamelCase, torch.Tensor ): lowerCamelCase__ : Optional[int] =processed else: lowerCamelCase__ : Any =list(processed.keys() )[0] lowerCamelCase__ : Union[str, Any] =processed[key] if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[Any] =len(lowerCamelCase ) else: lowerCamelCase__ : Any =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : Optional[int] =observed_batch_size lowerCamelCase__ : Optional[int] =processed lowerCamelCase__ : Dict =0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : Tuple =self.loader_batch_item() lowerCamelCase__ : str =item.pop('''is_last''' ) accumulator.append(lowerCamelCase ) if is_last: return accumulator else: lowerCamelCase__ : str =processed lowerCamelCase__ : Dict =item.pop('''is_last''' ) accumulator.append(lowerCamelCase ) return accumulator class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Dataset, lowerCamelCase : str )-> Union[str, Any]: lowerCamelCase__ : int =dataset lowerCamelCase__ : Optional[Any] =key def __len__( self : int )-> Optional[Any]: return len(self.dataset ) def __getitem__( self : Tuple, lowerCamelCase : List[str] )-> List[str]: return self.dataset[i][self.key] class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : List[str], lowerCamelCase : Dataset, lowerCamelCase : str, lowerCamelCase : str )-> str: lowerCamelCase__ : str =dataset lowerCamelCase__ : Tuple =keya lowerCamelCase__ : Any =keya def __len__( self : Optional[int] )-> List[Any]: return len(self.dataset ) def __getitem__( self : Union[str, Any], lowerCamelCase : Tuple )-> Union[str, Any]: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
625
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =VideoMAEConfig() set_architecture_configs(__lowerCamelCase , __lowerCamelCase ) if "finetuned" not in model_name: lowerCamelCase__ : int =False if "finetuned" in model_name: lowerCamelCase__ : str ='''huggingface/label-files''' if "kinetics" in model_name: lowerCamelCase__ : List[Any] =400 lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json''' elif "ssv2" in model_name: lowerCamelCase__ : Tuple =174 lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Dict =idalabel lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()} return config def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): """simple docstring""" if "small" in model_name: lowerCamelCase__ : Optional[Any] =384 lowerCamelCase__ : List[Any] =1536 lowerCamelCase__ : int =12 lowerCamelCase__ : Dict =16 lowerCamelCase__ : List[Any] =12 lowerCamelCase__ : Optional[Any] =3 lowerCamelCase__ : Union[str, Any] =192 lowerCamelCase__ : str =768 elif "large" in model_name: lowerCamelCase__ : Union[str, Any] =1024 lowerCamelCase__ : str =4096 lowerCamelCase__ : int =24 lowerCamelCase__ : Dict =16 lowerCamelCase__ : Union[str, Any] =12 lowerCamelCase__ : List[Any] =8 lowerCamelCase__ : int =512 lowerCamelCase__ : Optional[Any] =2048 elif "huge" in model_name: lowerCamelCase__ : Optional[int] =1280 lowerCamelCase__ : Optional[int] =5120 lowerCamelCase__ : List[Any] =32 lowerCamelCase__ : List[Any] =16 lowerCamelCase__ : Optional[Any] =12 lowerCamelCase__ : Dict =8 lowerCamelCase__ : List[Any] =640 lowerCamelCase__ : Any =2560 elif "base" not in model_name: raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" if "encoder." in name: lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' ) if "cls_token" in name: lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' ) if "attn" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' ) return name def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase ) if key.startswith('''encoder.''' ): lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' ) if "qkv" in key: lowerCamelCase__ : Any =key.split('''.''' ) if key.startswith('''decoder.blocks''' ): lowerCamelCase__ : Tuple =config.decoder_hidden_size lowerCamelCase__ : str =int(key_split[2] ) lowerCamelCase__ : Any ='''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase__ : List[Any] =val[:dim, :] lowerCamelCase__ : Any =val[dim : dim * 2, :] lowerCamelCase__ : Dict =val[-dim:, :] else: lowerCamelCase__ : Optional[Any] =config.hidden_size lowerCamelCase__ : Optional[Any] =int(key_split[1] ) lowerCamelCase__ : str ='''videomae.encoder.layer.''' if "weight" in key: lowerCamelCase__ : int =val[:dim, :] lowerCamelCase__ : Tuple =val[dim : dim * 2, :] lowerCamelCase__ : List[Any] =val[-dim:, :] else: lowerCamelCase__ : int =val return orig_state_dict def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : List[Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase ) return list(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase ) if "finetuned" in model_name: lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase ) else: lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase ) # download original checkpoint, hosted on Google Drive lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin''' gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' ) if "model" in files: lowerCamelCase__ : Dict =files['''model'''] else: lowerCamelCase__ : str =files['''module'''] lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # verify model on basic input lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) lowerCamelCase__ : int =prepare_video() lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' ) if "finetuned" not in model_name: lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase ) lowerCamelCase__ : int =model(**__lowerCamelCase ) lowerCamelCase__ : Dict =outputs.logits lowerCamelCase__ : List[str] =[ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase__ : int =torch.Size([1, 174] ) lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase__ : List[str] =torch.Size([1, 400] ) lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase__ : str =torch.Size([1, 400] ) lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase__ : Optional[int] =torch.Size([1, 174] ) lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase__ : str =torch.Size([1, 174] ) lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase__ : str =outputs.loss assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
625
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowercase : str = logging.get_logger(__name__) _lowercase : Tuple = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'conditional_detr' _a = ['past_key_values'] _a = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : str, lowerCamelCase : str=True, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[int]=3, lowerCamelCase : Union[str, Any]=300, lowerCamelCase : Optional[Any]=6, lowerCamelCase : Dict=2048, lowerCamelCase : int=8, lowerCamelCase : Optional[int]=6, lowerCamelCase : List[str]=2048, lowerCamelCase : Any=8, lowerCamelCase : Tuple=0.0, lowerCamelCase : str=0.0, lowerCamelCase : int=True, lowerCamelCase : Union[str, Any]="relu", lowerCamelCase : List[Any]=256, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Any=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : str=1.0, lowerCamelCase : List[Any]=False, lowerCamelCase : List[Any]="sine", lowerCamelCase : Union[str, Any]="resnet50", lowerCamelCase : Optional[int]=True, lowerCamelCase : Optional[int]=False, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Optional[Any]=5, lowerCamelCase : Optional[Any]=2, lowerCamelCase : Optional[Any]=1, lowerCamelCase : Union[str, Any]=1, lowerCamelCase : List[str]=2, lowerCamelCase : Optional[Any]=5, lowerCamelCase : int=2, lowerCamelCase : Any=0.25, **lowerCamelCase : Optional[int], )-> Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCamelCase__ : Dict =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : Tuple =backbone_config.get('''model_type''' ) lowerCamelCase__ : int =CONFIG_MAPPING[backbone_model_type] lowerCamelCase__ : Tuple =config_class.from_dict(lowerCamelCase ) lowerCamelCase__ : str =use_timm_backbone lowerCamelCase__ : Tuple =backbone_config lowerCamelCase__ : Tuple =num_channels lowerCamelCase__ : Dict =num_queries lowerCamelCase__ : Optional[Any] =d_model lowerCamelCase__ : Optional[Any] =encoder_ffn_dim lowerCamelCase__ : str =encoder_layers lowerCamelCase__ : List[Any] =encoder_attention_heads lowerCamelCase__ : Union[str, Any] =decoder_ffn_dim lowerCamelCase__ : Optional[Any] =decoder_layers lowerCamelCase__ : Optional[int] =decoder_attention_heads lowerCamelCase__ : Union[str, Any] =dropout lowerCamelCase__ : Optional[int] =attention_dropout lowerCamelCase__ : List[str] =activation_dropout lowerCamelCase__ : Any =activation_function lowerCamelCase__ : Any =init_std lowerCamelCase__ : str =init_xavier_std lowerCamelCase__ : Dict =encoder_layerdrop lowerCamelCase__ : Dict =decoder_layerdrop lowerCamelCase__ : Union[str, Any] =encoder_layers lowerCamelCase__ : Any =auxiliary_loss lowerCamelCase__ : Any =position_embedding_type lowerCamelCase__ : List[str] =backbone lowerCamelCase__ : Optional[int] =use_pretrained_backbone lowerCamelCase__ : Tuple =dilation # Hungarian matcher lowerCamelCase__ : Optional[int] =class_cost lowerCamelCase__ : Dict =bbox_cost lowerCamelCase__ : Union[str, Any] =giou_cost # Loss coefficients lowerCamelCase__ : Union[str, Any] =mask_loss_coefficient lowerCamelCase__ : List[Any] =dice_loss_coefficient lowerCamelCase__ : str =cls_loss_coefficient lowerCamelCase__ : Optional[int] =bbox_loss_coefficient lowerCamelCase__ : Union[str, Any] =giou_loss_coefficient lowerCamelCase__ : Dict =focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase ) @property def snake_case ( self : Optional[int] )-> int: return self.encoder_attention_heads @property def snake_case ( self : str )-> int: return self.d_model def snake_case ( self : List[str] )-> Union[str, Any]: lowerCamelCase__ : List[Any] =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase__ : Any =self.backbone_config.to_dict() lowerCamelCase__ : List[Any] =self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = version.parse('1.11' ) @property def snake_case ( self : int )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def snake_case ( self : str )-> float: return 1E-5 @property def snake_case ( self : Dict )-> int: return 12
625
"""simple docstring""" _lowercase : str = 0 # The first color of the flag. _lowercase : Dict = 1 # The second color of the flag. _lowercase : Tuple = 2 # The third color of the flag. _lowercase : Optional[int] = (red, white, blue) def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" if not sequence: return [] if len(__lowerCamelCase ) == 1: return list(__lowerCamelCase ) lowerCamelCase__ : List[Any] =0 lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1 lowerCamelCase__ : Tuple =0 while mid <= high: if sequence[mid] == colors[0]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid] high -= 1 else: lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(__lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip() _lowercase : int = [int(item.strip()) for item in user_input.split(",")] print(f'{dutch_national_flag_sort(unsorted)}')
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase : str = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys _lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = StableUnCLIPImgaImgPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _a = frozenset([] ) def snake_case ( self : List[str] )-> str: lowerCamelCase__ : Dict =32 lowerCamelCase__ : Optional[Any] =embedder_hidden_size # image encoding components lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple =CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0 ) lowerCamelCase__ : Dict =UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, ) torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =DDIMScheduler( beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =AutoencoderKL() lowerCamelCase__ : int ={ # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]: if str(lowerCamelCase ).startswith('''mps''' ): lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase ) else: lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: lowerCamelCase__ : int =input_image * 0.5 + 0.5 lowerCamelCase__ : Dict =input_image.clamp(0, 1 ) lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : str =self.get_dummy_components() lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase ) lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase ) inputs.update({'''image_embeds''': None} ) lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self : int )-> Tuple: lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def snake_case ( self : List[str] )-> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[int] )-> int: lowerCamelCase__ : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : Optional[int] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : List[Any] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : Tuple =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : List[Any] =pipe( lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', ) lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
625
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Dict: lowerCamelCase__ : List[str] =tempfile.mkdtemp() lowerCamelCase__ : List[str] =5 # Realm tok lowerCamelCase__ : Optional[int] =[ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase__ : Optional[int] =os.path.join(self.tmpdirname, '''realm_tokenizer''' ) os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase ) lowerCamelCase__ : Dict =os.path.join(lowerCamelCase, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCamelCase__ : Dict =os.path.join(self.tmpdirname, '''realm_block_records''' ) os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase ) def snake_case ( self : str )-> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''realm_tokenizer''' ) ) def snake_case ( self : str )-> List[Any]: shutil.rmtree(self.tmpdirname ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : List[str] =RealmConfig(num_block_records=self.num_block_records ) return config def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Union[str, Any] =Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def snake_case ( self : Dict )-> Tuple: lowerCamelCase__ : Any =np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ], dtype=lowerCamelCase, ) return block_records def snake_case ( self : Optional[Any] )-> Any: lowerCamelCase__ : List[str] =RealmRetriever( block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), ) return retriever def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Dict =self.get_config() lowerCamelCase__ : List[Any] =self.get_dummy_retriever() lowerCamelCase__ : Any =retriever.tokenizer lowerCamelCase__ : Union[str, Any] =np.array([0, 3], dtype='''long''' ) lowerCamelCase__ : Optional[int] =tokenizer(['''Test question'''] ).input_ids lowerCamelCase__ : List[str] =tokenizer( ['''the fourth'''], add_special_tokens=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, ).input_ids lowerCamelCase__ : Dict =config.reader_seq_len lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =retriever( lowerCamelCase, lowerCamelCase, answer_ids=lowerCamelCase, max_length=lowerCamelCase, return_tensors='''np''' ) self.assertEqual(len(lowerCamelCase ), 2 ) self.assertEqual(len(lowerCamelCase ), 2 ) self.assertEqual(len(lowerCamelCase ), 2 ) self.assertEqual(concat_inputs.input_ids.shape, (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape, (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ), ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''], ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ), ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''], ) def snake_case ( self : Optional[Any] )-> List[Any]: lowerCamelCase__ : Any =self.get_config() lowerCamelCase__ : Dict =self.get_dummy_retriever() lowerCamelCase__ : Any =retriever.tokenizer lowerCamelCase__ : Optional[Any] =np.array([0, 3, 5], dtype='''long''' ) lowerCamelCase__ : int =tokenizer(['''Test question'''] ).input_ids lowerCamelCase__ : Optional[Any] =tokenizer( ['''the fourth''', '''longer longer'''], add_special_tokens=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, ).input_ids lowerCamelCase__ : Union[str, Any] =config.reader_seq_len lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =retriever( lowerCamelCase, lowerCamelCase, answer_ids=lowerCamelCase, max_length=lowerCamelCase, return_tensors='''np''' ) self.assertEqual([False, True, True], lowerCamelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], lowerCamelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], lowerCamelCase ) def snake_case ( self : int )-> Tuple: lowerCamelCase__ : List[str] =self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname, '''realm_block_records''' ) ) # Test local path lowerCamelCase__ : Any =retriever.from_pretrained(os.path.join(self.tmpdirname, '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0], b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: lowerCamelCase__ : Optional[int] =os.path.join( os.path.join(self.tmpdirname, '''realm_block_records''' ), _REALM_BLOCK_RECORDS_FILENAME ) lowerCamelCase__ : Dict =RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0], b'''This is the first record''' )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 4000000 ): """simple docstring""" lowerCamelCase__ : Dict =[] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b return sum(__lowerCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
625
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging _lowercase : Any = logging.get_logger(__name__) _lowercase : Optional[Any] = { "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'perceiver' def __init__( self : Dict, lowerCamelCase : Optional[int]=256, lowerCamelCase : Optional[int]=1280, lowerCamelCase : Dict=768, lowerCamelCase : List[Any]=1, lowerCamelCase : Dict=26, lowerCamelCase : List[Any]=8, lowerCamelCase : Dict=8, lowerCamelCase : List[str]=None, lowerCamelCase : Dict=None, lowerCamelCase : Optional[Any]="kv", lowerCamelCase : List[str]=1, lowerCamelCase : Union[str, Any]=1, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : str=1E-12, lowerCamelCase : Any=True, lowerCamelCase : Optional[Any]=262, lowerCamelCase : Dict=2048, lowerCamelCase : Any=56, lowerCamelCase : Dict=[368, 496], lowerCamelCase : List[str]=16, lowerCamelCase : Any=1920, lowerCamelCase : List[str]=16, lowerCamelCase : List[Any]=[1, 16, 224, 224], **lowerCamelCase : Any, )-> List[str]: super().__init__(**lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =num_latents lowerCamelCase__ : List[str] =d_latents lowerCamelCase__ : Dict =d_model lowerCamelCase__ : Union[str, Any] =num_blocks lowerCamelCase__ : int =num_self_attends_per_block lowerCamelCase__ : str =num_self_attention_heads lowerCamelCase__ : str =num_cross_attention_heads lowerCamelCase__ : Optional[Any] =qk_channels lowerCamelCase__ : Optional[Any] =v_channels lowerCamelCase__ : Tuple =cross_attention_shape_for_attention lowerCamelCase__ : Optional[Any] =self_attention_widening_factor lowerCamelCase__ : str =cross_attention_widening_factor lowerCamelCase__ : str =hidden_act lowerCamelCase__ : List[str] =attention_probs_dropout_prob lowerCamelCase__ : List[Any] =initializer_range lowerCamelCase__ : Dict =layer_norm_eps lowerCamelCase__ : Optional[int] =use_query_residual # masked language modeling attributes lowerCamelCase__ : Optional[int] =vocab_size lowerCamelCase__ : Optional[Any] =max_position_embeddings # image classification attributes lowerCamelCase__ : Optional[Any] =image_size # flow attributes lowerCamelCase__ : Optional[Any] =train_size # multimodal autoencoding attributes lowerCamelCase__ : List[str] =num_frames lowerCamelCase__ : Optional[Any] =audio_samples_per_frame lowerCamelCase__ : Optional[Any] =samples_per_patch lowerCamelCase__ : Any =output_shape class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' @property def snake_case ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase__ : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase__ : Optional[int] ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def snake_case ( self : str )-> float: return 1E-4 def snake_case ( self : List[Any], lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, lowerCamelCase : int = 3, lowerCamelCase : int = 40, lowerCamelCase : int = 40, )-> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(lowerCamelCase, lowerCamelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase__ : Dict =compute_effective_axis_dimension( lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : int =preprocessor.num_special_tokens_to_add(lowerCamelCase ) lowerCamelCase__ : Optional[int] =compute_effective_axis_dimension( lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Tuple =[''' '''.join(['''a'''] ) * seq_length] * batch_size lowerCamelCase__ : Any =dict(preprocessor(lowerCamelCase, return_tensors=lowerCamelCase ) ) lowerCamelCase__ : List[str] =inputs.pop('''input_ids''' ) return inputs elif isinstance(lowerCamelCase, lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase__ : List[str] =compute_effective_axis_dimension(lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch ) lowerCamelCase__ : List[str] =self._generate_dummy_images(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =dict(preprocessor(images=lowerCamelCase, return_tensors=lowerCamelCase ) ) lowerCamelCase__ : Optional[int] =inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
625
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = BlenderbotSmallConfig _a = {} _a = 'gelu' def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]: lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =batch_size lowerCamelCase__ : Optional[int] =seq_length lowerCamelCase__ : Tuple =is_training lowerCamelCase__ : Dict =use_labels lowerCamelCase__ : List[Any] =vocab_size lowerCamelCase__ : str =hidden_size lowerCamelCase__ : str =num_hidden_layers lowerCamelCase__ : Union[str, Any] =num_attention_heads lowerCamelCase__ : Any =intermediate_size lowerCamelCase__ : Dict =hidden_dropout_prob lowerCamelCase__ : List[Any] =attention_probs_dropout_prob lowerCamelCase__ : str =max_position_embeddings lowerCamelCase__ : Optional[int] =eos_token_id lowerCamelCase__ : str =pad_token_id lowerCamelCase__ : Union[str, Any] =bos_token_id def snake_case ( self : Any )-> Any: lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 ) lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : int =self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return config, inputs_dict def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder() lowerCamelCase__ : List[Any] =inputs_dict['''input_ids'''] lowerCamelCase__ : Optional[int] =input_ids[:1, :] lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :] lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask'''] lowerCamelCase__ : Optional[Any] =1 # first forward pass lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 ) lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0] lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) ) lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ): """simple docstring""" if attention_mask is None: lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ : str =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _a = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _a = True _a = False _a = False def snake_case ( self : Any )-> str: lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase ) def snake_case ( self : Any )-> Optional[int]: self.config_tester.run_common_tests() def snake_case ( self : int )-> str: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] _a = 'facebook/blenderbot_small-90M' @cached_property def snake_case ( self : Any )-> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def snake_case ( self : int )-> List[Any]: lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' ) lowerCamelCase__ : Any =self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, ) lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
625
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, lowerCamelCase : Dict, lowerCamelCase : str=7, lowerCamelCase : Optional[int]=3, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Tuple=None, lowerCamelCase : List[Any]=True, lowerCamelCase : int=[0.5, 0.5, 0.5], lowerCamelCase : Any=[0.5, 0.5, 0.5], lowerCamelCase : int=True, lowerCamelCase : Dict=1 / 255, lowerCamelCase : Union[str, Any]=True, )-> Any: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCamelCase__ : List[Any] =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} lowerCamelCase__ : Dict =parent lowerCamelCase__ : str =batch_size lowerCamelCase__ : Optional[Any] =num_channels lowerCamelCase__ : Optional[int] =min_resolution lowerCamelCase__ : Dict =max_resolution lowerCamelCase__ : Dict =do_resize lowerCamelCase__ : List[str] =size lowerCamelCase__ : str =do_normalize lowerCamelCase__ : Dict =image_mean lowerCamelCase__ : List[Any] =image_std lowerCamelCase__ : List[Any] =do_rescale lowerCamelCase__ : Optional[Any] =rescale_factor lowerCamelCase__ : Dict =do_pad def snake_case ( self : List[str] )-> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case ( self : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Tuple=False )-> List[Any]: if not batched: lowerCamelCase__ : Any =image_inputs[0] if isinstance(lowerCamelCase, Image.Image ): lowerCamelCase__ , lowerCamelCase__ : List[str] =image.size else: lowerCamelCase__ , lowerCamelCase__ : List[str] =image.shape[1], image.shape[2] if w < h: lowerCamelCase__ : Union[str, Any] =int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase__ : List[Any] =self.size['''shortest_edge'''] elif w > h: lowerCamelCase__ : Optional[Any] =self.size['''shortest_edge'''] lowerCamelCase__ : Union[str, Any] =int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase__ : List[Any] =self.size['''shortest_edge'''] lowerCamelCase__ : List[Any] =self.size['''shortest_edge'''] else: lowerCamelCase__ : Optional[Any] =[] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ : Any =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : List[str] =max(lowerCamelCase, key=lambda lowerCamelCase : item[0] )[0] lowerCamelCase__ : Optional[int] =max(lowerCamelCase, key=lambda lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ConditionalDetrImageProcessor if is_vision_available() else None def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Any =ConditionalDetrImageProcessingTester(self ) @property def snake_case ( self : Union[str, Any] )-> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self : Tuple )-> Tuple: lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCamelCase, '''size''' ) ) def snake_case ( self : Tuple )-> Union[str, Any]: lowerCamelCase__ : str =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad, lowerCamelCase ) lowerCamelCase__ : Dict =self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=lowerCamelCase ) self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad, lowerCamelCase ) def snake_case ( self : Dict )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: # Initialize image_processing lowerCamelCase__ : Any =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Any =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase, Image.Image ) # Test not batched input lowerCamelCase__ : Any =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Tuple =self.image_processor_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase__ , lowerCamelCase__ : int =self.image_processor_tester.get_expected_values(lowerCamelCase, batched=lowerCamelCase ) lowerCamelCase__ : Optional[Any] =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def snake_case ( self : Any )-> str: # Initialize image_processing lowerCamelCase__ : List[str] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : List[str] =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase, np.ndarray ) # Test not batched input lowerCamelCase__ : List[Any] =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Dict =self.image_processor_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase__ : Optional[int] =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Any =self.image_processor_tester.get_expected_values(lowerCamelCase, batched=lowerCamelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def snake_case ( self : Tuple )-> Tuple: # Initialize image_processing lowerCamelCase__ : Optional[int] =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Dict =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase, torch.Tensor ) # Test not batched input lowerCamelCase__ : List[Any] =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : int =self.image_processor_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase__ : int =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] =self.image_processor_tester.get_expected_values(lowerCamelCase, batched=lowerCamelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def snake_case ( self : str )-> Optional[int]: # prepare image and target lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f: lowerCamelCase__ : List[Any] =json.loads(f.read() ) lowerCamelCase__ : int ={'''image_id''': 3_9769, '''annotations''': target} # encode them lowerCamelCase__ : Optional[int] =ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) lowerCamelCase__ : List[str] =image_processing(images=lowerCamelCase, annotations=lowerCamelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase__ : str =torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCamelCase, atol=1E-4 ) ) # verify area lowerCamelCase__ : Tuple =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCamelCase ) ) # verify boxes lowerCamelCase__ : str =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCamelCase ) lowerCamelCase__ : List[str] =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCamelCase, atol=1E-3 ) ) # verify image_id lowerCamelCase__ : str =torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCamelCase ) ) # verify is_crowd lowerCamelCase__ : Dict =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCamelCase ) ) # verify class_labels lowerCamelCase__ : Dict =torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCamelCase ) ) # verify orig_size lowerCamelCase__ : List[Any] =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCamelCase ) ) # verify size lowerCamelCase__ : str =torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCamelCase ) ) @slow def snake_case ( self : List[Any] )-> Any: # prepare image, target and masks_path lowerCamelCase__ : Tuple =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f: lowerCamelCase__ : Optional[int] =json.loads(f.read() ) lowerCamelCase__ : Optional[int] ={'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target} lowerCamelCase__ : int =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCamelCase__ : List[Any] =ConditionalDetrImageProcessor(format='''coco_panoptic''' ) lowerCamelCase__ : str =image_processing(images=lowerCamelCase, annotations=lowerCamelCase, masks_path=lowerCamelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase__ : Optional[int] =torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCamelCase ) lowerCamelCase__ : Tuple =torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCamelCase, atol=1E-4 ) ) # verify area lowerCamelCase__ : Any =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCamelCase ) ) # verify boxes lowerCamelCase__ : Tuple =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCamelCase ) lowerCamelCase__ : Tuple =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCamelCase, atol=1E-3 ) ) # verify image_id lowerCamelCase__ : Optional[int] =torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCamelCase ) ) # verify is_crowd lowerCamelCase__ : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCamelCase ) ) # verify class_labels lowerCamelCase__ : List[Any] =torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCamelCase ) ) # verify masks lowerCamelCase__ : Optional[int] =82_2873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), lowerCamelCase ) # verify orig_size lowerCamelCase__ : List[str] =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCamelCase ) ) # verify size lowerCamelCase__ : Optional[int] =torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCamelCase ) )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ): """simple docstring""" # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ): """simple docstring""" # Base Case if curr_ind == len(__lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__lowerCamelCase ) ): if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): # Insert current vertex into path as next transition lowerCamelCase__ : Tuple =next_ver # Validate created path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ): return True # Backtrack lowerCamelCase__ : int =-1 return False def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ): """simple docstring""" lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1) # initialize start and end of path with starting index lowerCamelCase__ : Union[str, Any] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
625
1
"""simple docstring""" import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowercase : List[Any] = logging.getLogger() def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =argparse.ArgumentParser() parser.add_argument('''-f''' ) lowerCamelCase__ : Union[str, Any] =parser.parse_args() return args.f def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" lowerCamelCase__ : List[str] ={} lowerCamelCase__ : Dict =os.path.join(__lowerCamelCase , '''all_results.json''' ) if os.path.exists(__lowerCamelCase ): with open(__lowerCamelCase , '''r''' ) as f: lowerCamelCase__ : Union[str, Any] =json.load(__lowerCamelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : str =torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() _lowercase : Tuple = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' @classmethod def snake_case ( cls : Dict )-> Any: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU lowerCamelCase__ : Dict =tempfile.mkdtemp() lowerCamelCase__ : int =os.path.join(cls.tmpdir, '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase__ : Tuple =['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Any )-> int: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Dict )-> Optional[int]: lowerCamelCase__ : List[Any] =self.get_auto_remove_tmp_dir() lowerCamelCase__ : str =F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowerCamelCase__ : List[str] =get_results(lowerCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : str )-> Tuple: lowerCamelCase__ : Dict =self.get_auto_remove_tmp_dir() lowerCamelCase__ : int =F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowerCamelCase__ : List[str] =get_results(lowerCamelCase ) self.assertLess(result['''perplexity'''], 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Optional[int] =self.get_auto_remove_tmp_dir() lowerCamelCase__ : List[str] =F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : List[Any] =get_results(lowerCamelCase ) self.assertLess(result['''perplexity'''], 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any )-> Any: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu lowerCamelCase__ : Tuple =7 if get_gpu_count() > 1 else 2 lowerCamelCase__ : int =self.get_auto_remove_tmp_dir() lowerCamelCase__ : List[Any] =F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : Union[str, Any] =get_results(lowerCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 ) self.assertLess(result['''train_loss'''], 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Union[str, Any] )-> Union[str, Any]: lowerCamelCase__ : Any =self.get_auto_remove_tmp_dir() lowerCamelCase__ : Any =F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : int =get_results(lowerCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''], 28 ) self.assertGreaterEqual(result['''eval_exact'''], 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : List[str] )-> int: lowerCamelCase__ : Optional[int] =self.get_auto_remove_tmp_dir() lowerCamelCase__ : Tuple =F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : Any =get_results(lowerCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''], 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Dict =self.get_auto_remove_tmp_dir() lowerCamelCase__ : str =F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : str =get_results(lowerCamelCase ) self.assertGreaterEqual(result['''eval_rouge1'''], 10 ) self.assertGreaterEqual(result['''eval_rouge2'''], 2 ) self.assertGreaterEqual(result['''eval_rougeL'''], 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''], 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : Optional[int] =self.get_auto_remove_tmp_dir() lowerCamelCase__ : Optional[Any] =F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : Dict =get_results(lowerCamelCase ) self.assertGreaterEqual(result['''eval_bleu'''], 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''translation_no_trainer''' ) ) ) @slow def snake_case ( self : Optional[int] )-> Dict: lowerCamelCase__ : Optional[Any] =logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCamelCase ) lowerCamelCase__ : str =self.get_auto_remove_tmp_dir() lowerCamelCase__ : List[str] =F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) lowerCamelCase__ : str =get_results(lowerCamelCase ) self.assertGreaterEqual(result['''eval_overall_accuracy'''], 0.10 ) @mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : int )-> int: lowerCamelCase__ : Dict =self.get_auto_remove_tmp_dir() lowerCamelCase__ : Optional[Any] =F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowerCamelCase__ : Tuple =get_results(lowerCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''], 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, '''image_classification_no_trainer''' ) ) )
625
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 2_5_0_0_0_4 _lowercase : Optional[Any] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = MBartTokenizer _a = MBartTokenizerFast _a = True _a = True def snake_case ( self : Tuple )-> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict )-> Union[str, Any]: lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def snake_case ( self : Tuple )-> List[Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : List[str] =tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ : Dict =tempfile.mkdtemp() lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ : Optional[int] =tempfile.mkdtemp() lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = 'facebook/mbart-large-en-ro' _a = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _a = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case ( cls : List[Any] )-> Optional[int]: lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) lowerCamelCase__ : Optional[int] =1 return cls def snake_case ( self : Optional[Any] )-> List[str]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 ) def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) def snake_case ( self : Optional[Any] )-> str: self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids ) lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase, lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], lowerCamelCase ) lowerCamelCase__ : Dict =10 lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2], 2 ) self.assertEqual(ids[-1], lowerCamelCase ) self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : int )-> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] ) def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : int =tempfile.mkdtemp() lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase ) @require_torch def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' ) lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case ( self : Optional[Any] )-> Any: lowerCamelCase__ : str =self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase, lowerCamelCase ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) lowerCamelCase__ : Any =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' ) lowerCamelCase__ : Tuple =self.tokenizer( text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' ) lowerCamelCase__ : Union[str, Any] =targets['''input_ids'''] lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : str =self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCamelCase ), { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 25_0004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_0001, }, )
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase__ : str ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
625
1
"""simple docstring""" import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _lowercase : Union[str, Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") _lowercase : int = parser.parse_args() _lowercase : List[str] = "cpu" _lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" _lowercase : Union[str, Any] = "path-to-your-trained-model" _lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _lowercase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _lowercase : Optional[Any] = pipe.to(device) # to channels last _lowercase : str = pipe.unet.to(memory_format=torch.channels_last) _lowercase : List[str] = pipe.vae.to(memory_format=torch.channels_last) _lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _lowercase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _lowercase : int = torch.randn(2, 4, 6_4, 6_4) _lowercase : str = torch.rand(1) * 9_9_9 _lowercase : Optional[int] = torch.randn(2, 7_7, 7_6_8) _lowercase : str = (sample, timestep, encoder_hidden_status) try: _lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _lowercase : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _lowercase : str = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _lowercase : Tuple = 6_6_6 _lowercase : Any = torch.Generator(device).manual_seed(seed) _lowercase : Union[str, Any] = {"generator": generator} if args.steps is not None: _lowercase : Any = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _lowercase : Optional[Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ): """simple docstring""" lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase ) lowerCamelCase__ : str =range(1 , __lowerCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Optional[int] = { "configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", "LiltModel", "LiltPreTrainedModel", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if isinstance(__lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]: pass def snake_case ( self : List[str] )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict: lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int: lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : int =output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : List[str] =after_output[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[str] =model( input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : int =output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : int =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] =output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any: pt_model.to(lowerCamelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ : Any =inputs_dict lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) pt_model_loaded.to(lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 ) def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]: lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Tuple =fx_state self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : int =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def snake_case ( self : Tuple )-> Any: lowerCamelCase__ : Tuple =self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def snake_case ( self : str )-> Any: lowerCamelCase__ : str =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' ) lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' ) lowerCamelCase__ : Tuple =config_inputs_dict self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase ) self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs() lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[str] =outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[Any] =after_outputs[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : List[str] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int: lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase ) lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =FlaxViTModelTester(self ) lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : Optional[Any] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : str =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict: lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self ) lowerCamelCase__ : List[Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase__ : Dict =processor( text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ) lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float ): """simple docstring""" if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if index == number_of_items: return 0 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ : Dict =values[index] + knapsack( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : int )-> Optional[Any]: if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowerCamelCase__ : Optional[int] =img lowerCamelCase__ : Dict =img.shape[1] lowerCamelCase__ : List[str] =img.shape[0] lowerCamelCase__ : Any =dst_width lowerCamelCase__ : Tuple =dst_height lowerCamelCase__ : Union[str, Any] =self.src_w / self.dst_w lowerCamelCase__ : Tuple =self.src_h / self.dst_h lowerCamelCase__ : List[str] =( np.ones((self.dst_h, self.dst_w, 3), np.uinta ) * 255 ) def snake_case ( self : Optional[Any] )-> int: for i in range(self.dst_h ): for j in range(self.dst_w ): lowerCamelCase__ : List[Any] =self.img[self.get_y(lowerCamelCase )][self.get_x(lowerCamelCase )] def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int: return int(self.ratio_x * x ) def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int: return int(self.ratio_y * y ) if __name__ == "__main__": _lowercase , _lowercase : int = 8_0_0, 6_0_0 _lowercase : str = imread("image_data/lena.jpg", 1) _lowercase : str = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output ) waitKey(0) destroyAllWindows()
625
"""simple docstring""" _lowercase : Optional[Any] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
625
1
"""simple docstring""" import colorsys from PIL import Image # type: ignore def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =x lowerCamelCase__ : Any =y for step in range(__lowerCamelCase ): # noqa: B007 lowerCamelCase__ : List[Any] =a * a - b * b + x lowerCamelCase__ : Optional[int] =2 * a * b + y lowerCamelCase__ : Union[str, Any] =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) ) lowerCamelCase__ : Optional[int] =img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[int] ): """simple docstring""" if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCamelCase__ : Any =numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products lowerCamelCase__ : Dict =numbers[i] if number < 0: lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number ) lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase ) return max_prod
625
1
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' ) lowerCamelCase__ : Any =json.loads(open(__lowerCamelCase ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowerCamelCase__ : List[str] =args.output + '''.pt''' lowerCamelCase__ : Tuple =OrderedDict() with tf.device('''/CPU:0''' ): lowerCamelCase__ : Union[str, Any] =tf.train.load_checkpoint(args.tf_model_dir ) lowerCamelCase__ : List[str] =reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowerCamelCase__ : int =reader.get_tensor(__lowerCamelCase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowerCamelCase__ : Dict =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowerCamelCase__ : int =8 lowerCamelCase__ : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowerCamelCase__ : Optional[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Tuple =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/moe''' ): lowerCamelCase__ : List[Any] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowerCamelCase__ : Any ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowerCamelCase__ : List[str] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/softmlp/kernel''' ): lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowerCamelCase__ : Optional[int] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : int =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowerCamelCase__ : List[Any] =key_name[-9:-7] for i in range(16 ): lowerCamelCase__ : Optional[int] ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowerCamelCase__ : List[str] =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/mlp''' ): lowerCamelCase__ : Union[str, Any] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowerCamelCase__ : List[str] ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowerCamelCase__ : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/p1/bias''' ): lowerCamelCase__ : int ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowerCamelCase__ : Optional[int] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Optional[int] =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/p2/kernel''' ): lowerCamelCase__ : str ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowerCamelCase__ : Optional[int] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/p2/bias''' ): lowerCamelCase__ : List[str] ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowerCamelCase__ : Tuple =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/ln''' ): lowerCamelCase__ : Dict =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCamelCase__ : Tuple ='''model.blocks.%d.feed_forward.norm.bias''' % player lowerCamelCase__ : Dict =vnp.copy() # same because it is one dimensional lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/g''' ): lowerCamelCase__ : Any ='''model.blocks.%d.feed_forward.norm.weight''' % player lowerCamelCase__ : List[Any] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/att''' ): lowerCamelCase__ : Any =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowerCamelCase__ : List[Any] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowerCamelCase__ : Optional[Any] =state[:, 0, :, :] lowerCamelCase__ : Dict =state[:, 1, :, :] lowerCamelCase__ : Union[str, Any] =state[:, 2, :, :] lowerCamelCase__ : int =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : List[str] =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Union[str, Any] =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowerCamelCase__ : int =torch.tensor(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowerCamelCase__ : List[str] =torch.tensor(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/o/kernel''' ): lowerCamelCase__ : List[str] ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowerCamelCase__ : Any =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : List[str] =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/an''' ): lowerCamelCase__ : Optional[Any] =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCamelCase__ : Dict ='''model.blocks.%d.self_attn.norm.bias''' % player lowerCamelCase__ : str =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Union[str, Any] =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/g''' ): lowerCamelCase__ : Union[str, Any] ='''model.blocks.%d.self_attn.norm.weight''' % player lowerCamelCase__ : List[Any] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowerCamelCase__ : Union[str, Any] ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowerCamelCase__ : int ='''model.%s.weight''' % nlayer lowerCamelCase__ : List[Any] =vnp.copy() # same in embedded lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase ) if key_name.startswith('''model/wte''' ): lowerCamelCase__ : Union[str, Any] ='''lm_head.weight''' lowerCamelCase__ : int =vnp.copy() # same in embedded lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/wob''' ): lowerCamelCase__ : List[Any] ='''final_logits_bias''' lowerCamelCase__ : List[Any] =vnp.copy() # same in embedded lowerCamelCase__ : List[str] =state.reshape((1, -1) ) lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase ) elif key_name == "model/dense/kernel": lowerCamelCase__ : str ='''model.last_project.weight''' lowerCamelCase__ : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) elif key_name == "model/dense_1/bias": lowerCamelCase__ : str ='''model.last_project.bias''' lowerCamelCase__ : Tuple =vnp.copy() # same because it is one dimensional lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) torch.save(__lowerCamelCase , args.output ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") _lowercase : List[Any] = parser.parse_args() convert_tf_gptsan_to_pt(args)
625
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 42 _a = 42 class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' _a = 42 _a = (1_6, 3_2, 9_6, 2_5_6) _a = jnp.floataa def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Tuple =nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) lowerCamelCase__ : Dict =[] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ : Dict =self.block_out_channels[i] lowerCamelCase__ : Dict =self.block_out_channels[i + 1] lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Optional[int] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Any =blocks lowerCamelCase__ : Optional[int] =nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase ) lowerCamelCase__ : Dict =nn.silu(lowerCamelCase ) for block in self.blocks: lowerCamelCase__ : str =block(lowerCamelCase ) lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase ) lowerCamelCase__ : Any =self.conv_out(lowerCamelCase ) return embedding @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' _a = 3_2 _a = 4 _a = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _a = False _a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _a = 2 _a = 8 _a = None _a = 1_2_8_0 _a = 0.0 _a = False _a = jnp.floataa _a = True _a = 0 _a = "rgb" _a = (1_6, 3_2, 9_6, 2_5_6) def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict: # init input tensors lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa ) lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase ) lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"] def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Optional[int] =self.block_out_channels lowerCamelCase__ : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : int =nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time lowerCamelCase__ : str =FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype ) lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) lowerCamelCase__ : Dict =self.only_cross_attention if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Dict =[] lowerCamelCase__ : List[Any] =block_out_channels[0] lowerCamelCase__ : List[Any] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] =output_channel lowerCamelCase__ : str =block_out_channels[i] lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: lowerCamelCase__ : List[Any] =FlaxDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(lowerCamelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) if not is_final_block: lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) lowerCamelCase__ : int =down_blocks lowerCamelCase__ : List[str] =controlnet_down_blocks # mid lowerCamelCase__ : Tuple =block_out_channels[-1] lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase__ : int =self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 ) # 1. time if not isinstance(lowerCamelCase, jnp.ndarray ): lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 ) lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase ) # 2. pre-process lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase ) lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ : Union[str, Any] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) # 5. contronet blocks lowerCamelCase__ : Optional[Any] =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ): lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : List[str] =controlnet_down_block_res_samples lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase ) # 6. scaling lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" for i in range(len(__lowerCamelCase ) - 1 , 0 , -1 ): lowerCamelCase__ : int =False for j in range(__lowerCamelCase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =unsorted[j - 1], unsorted[j] lowerCamelCase__ : List[Any] =True for j in range(__lowerCamelCase ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase__ , lowerCamelCase__ : Tuple =unsorted[j + 1], unsorted[j] lowerCamelCase__ : List[str] =True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Tuple = input("Enter numbers separated by a comma:\n").strip() _lowercase : Dict = [int(item) for item in user_input.split(",")] print(f'{cocktail_shaker_sort(unsorted) = }')
625
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["CLIPFeatureExtractor"] _lowercase : int = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Any = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'informer' _a = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : str, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : str = "student_t", lowerCamelCase : str = "nll", lowerCamelCase : int = 1, lowerCamelCase : List[int] = None, lowerCamelCase : Optional[Union[str, bool]] = "mean", lowerCamelCase : int = 0, lowerCamelCase : int = 0, lowerCamelCase : int = 0, lowerCamelCase : int = 0, lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : int = 64, lowerCamelCase : int = 32, lowerCamelCase : int = 32, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : bool = True, lowerCamelCase : str = "gelu", lowerCamelCase : float = 0.05, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : int = 100, lowerCamelCase : float = 0.02, lowerCamelCase : Optional[int]=True, lowerCamelCase : str = "prob", lowerCamelCase : int = 5, lowerCamelCase : bool = True, **lowerCamelCase : Union[str, Any], )-> Optional[Any]: # time series specific configuration lowerCamelCase__ : List[Any] =prediction_length lowerCamelCase__ : List[str] =context_length or prediction_length lowerCamelCase__ : Union[str, Any] =distribution_output lowerCamelCase__ : int =loss lowerCamelCase__ : List[Any] =input_size lowerCamelCase__ : Dict =num_time_features lowerCamelCase__ : Any =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase__ : Optional[int] =scaling lowerCamelCase__ : List[str] =num_dynamic_real_features lowerCamelCase__ : Optional[int] =num_static_real_features lowerCamelCase__ : Any =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCamelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase__ : Tuple =cardinality else: lowerCamelCase__ : Dict =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCamelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase__ : List[Any] =embedding_dimension else: lowerCamelCase__ : Dict =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase__ : Union[str, Any] =num_parallel_samples # Transformer architecture configuration lowerCamelCase__ : Optional[int] =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase__ : Any =d_model lowerCamelCase__ : str =encoder_attention_heads lowerCamelCase__ : int =decoder_attention_heads lowerCamelCase__ : Optional[int] =encoder_ffn_dim lowerCamelCase__ : Dict =decoder_ffn_dim lowerCamelCase__ : Union[str, Any] =encoder_layers lowerCamelCase__ : Optional[Any] =decoder_layers lowerCamelCase__ : List[Any] =dropout lowerCamelCase__ : int =attention_dropout lowerCamelCase__ : Dict =activation_dropout lowerCamelCase__ : Dict =encoder_layerdrop lowerCamelCase__ : str =decoder_layerdrop lowerCamelCase__ : Optional[int] =activation_function lowerCamelCase__ : int =init_std lowerCamelCase__ : Union[str, Any] =use_cache # Informer lowerCamelCase__ : Union[str, Any] =attention_type lowerCamelCase__ : Optional[Any] =sampling_factor lowerCamelCase__ : Optional[Any] =distil super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase ) @property def snake_case ( self : int )-> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
625
"""simple docstring""" import os def snake_case__ ( ): """simple docstring""" with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ : Tuple =str(file.readlines()[0] ) lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : str =0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Dict =0 return total_score if __name__ == "__main__": print(solution())
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) lowerCamelCase__ : List[str] =[0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCamelCase__ : str =1 if upper_limit > 0: lowerCamelCase__ : Optional[int] =1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__lowerCamelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: _lowercase : List[Any] = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(f'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
625
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str, lowerCamelCase : int )-> None: lowerCamelCase__ : str =value lowerCamelCase__ : Node | None =None lowerCamelCase__ : Node | None =None class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Node )-> None: lowerCamelCase__ : Any =tree def snake_case ( self : str, lowerCamelCase : Node | None )-> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Dict )-> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : List[str]=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =OmegaConf.load(__lowerCamelCase ) if display: print(yaml.dump(OmegaConf.to_container(__lowerCamelCase ) ) ) return config def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None ): """simple docstring""" if conf_path is None: lowerCamelCase__ : Dict ='''./model_checkpoints/vqgan_only.yaml''' lowerCamelCase__ : str =load_config(__lowerCamelCase , display=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =VQModel(**config.model.params ) if ckpt_path is None: lowerCamelCase__ : Optional[int] ='''./model_checkpoints/vqgan_only.pt''' lowerCamelCase__ : Any =torch.load(__lowerCamelCase , map_location=__lowerCamelCase ) if ".ckpt" in ckpt_path: lowerCamelCase__ : Tuple =sd['''state_dict'''] model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) model.to(__lowerCamelCase ) del sd return model def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int =model.encode(__lowerCamelCase ) print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' ) lowerCamelCase__ : Optional[int] =model.decode(__lowerCamelCase ) return xrec def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Any =string.rsplit('''.''' , 1 ) if reload: lowerCamelCase__ : int =importlib.import_module(__lowerCamelCase ) importlib.reload(__lowerCamelCase ) return getattr(importlib.import_module(__lowerCamelCase , package=__lowerCamelCase ) , cls ) def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=True ): """simple docstring""" lowerCamelCase__ : Optional[Any] =instantiate_from_config(__lowerCamelCase ) if sd is not None: model.load_state_dict(__lowerCamelCase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ): """simple docstring""" # load the specified checkpoint if ckpt: lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' ) lowerCamelCase__ : Optional[int] =pl_sd['''global_step'''] print(f'''loaded model from global step {global_step}.''' ) else: lowerCamelCase__ : int ={'''state_dict''': None} lowerCamelCase__ : List[Any] =None lowerCamelCase__ : Union[str, Any] =load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__lowerCamelCase , eval_mode=__lowerCamelCase )['''model'''] return model, global_step
625
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[str] = logging.getLogger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): """simple docstring""" # save results if os.path.exists(__lowerCamelCase ): if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =2 if unlogit: lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase ) lowerCamelCase__ : Tuple =0 return -plogp.sum(dim=-1 ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) ) for row in range(len(__lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) if head_mask is None: lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =0.0 lowerCamelCase__ : Union[str, Any] =0.0 for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase__) , ) : Any =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase__ : int =2 lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCamelCase ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase__ : Dict =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase ) print_ad_tensor(__lowerCamelCase ) return attn_entropy, head_importance, total_loss def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase ) lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold ) lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase__ : List[Any] =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase__ : int =float('''Inf''' ) lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1] if len(__lowerCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 ) lowerCamelCase__ : Optional[Any] =0.0 lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase ) lowerCamelCase__ : Tuple =new_head_mask.clone().detach() print_ad_tensor(__lowerCamelCase ) # Compute metric and head importance again lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Any =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Tuple =1 / loss lowerCamelCase__ : Optional[Any] =datetime.now() - before_time lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] =[ v, ] assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCamelCase ) lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , ) lowerCamelCase__ : str =1 / loss lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCamelCase , args.output_dir ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase__ : List[Any] =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank ) lowerCamelCase__ : Any =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel( __lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase ) elif args.n_gpu > 1: lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase ) # Prepare dataset lowerCamelCase__ : Union[str, Any] =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),) lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase ) lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase ) lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
625
1
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _lowercase : List[str] = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize _lowercase : str = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" _lowercase : int = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" _lowercase : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def snake_case ( self : Dict )-> List[str]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''], reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ], ) def snake_case ( self : str, lowerCamelCase : Dict )-> Optional[int]: import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def snake_case ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : List[str]=0.9, lowerCamelCase : str=3, lowerCamelCase : Union[str, Any]=0.5 )-> int: if NLTK_VERSION >= version.Version('''3.6.5''' ): lowerCamelCase__ : Tuple =[ meteor_score.single_meteor_score( word_tokenize(lowerCamelCase ), word_tokenize(lowerCamelCase ), alpha=lowerCamelCase, beta=lowerCamelCase, gamma=lowerCamelCase ) for ref, pred in zip(lowerCamelCase, lowerCamelCase ) ] else: lowerCamelCase__ : List[Any] =[ meteor_score.single_meteor_score(lowerCamelCase, lowerCamelCase, alpha=lowerCamelCase, beta=lowerCamelCase, gamma=lowerCamelCase ) for ref, pred in zip(lowerCamelCase, lowerCamelCase ) ] return {"meteor": np.mean(lowerCamelCase )}
625
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowerCamelCase__ : List[str] ='''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : List[Any] ='''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : int =tax_attention_key lowerCamelCase__ : Optional[int] =tax_attention_out lowerCamelCase__ : List[Any] =tax_attention_query lowerCamelCase__ : Optional[Any] =tax_attention_value lowerCamelCase__ : List[str] =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : Optional[int] =tax_mlp_wi_a lowerCamelCase__ : Optional[int] =tax_mlp_wi_a else: lowerCamelCase__ : Union[str, Any] =tax_mlp_wi lowerCamelCase__ : str =tax_mlp_wo lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : str =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowerCamelCase__ : List[Any] =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel'''] lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel'''] lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel'''] lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : Union[str, Any] =tax_attention_key lowerCamelCase__ : str =tax_attention_out lowerCamelCase__ : Optional[int] =tax_attention_query lowerCamelCase__ : Dict =tax_attention_value lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key lowerCamelCase__ : Any =tax_enc_dec_attention_out lowerCamelCase__ : Any =tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value lowerCamelCase__ : Dict =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : Tuple =tax_mlp_wi_a lowerCamelCase__ : int =tax_mlp_wi_a else: lowerCamelCase__ : List[Any] =tax_mlp_wi lowerCamelCase__ : Dict =tax_mlp_wo lowerCamelCase__ : Tuple =txa_mlp_layer_norm lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowerCamelCase__ : int =txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Tuple =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding'''] lowerCamelCase__ : Dict =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__lowerCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) _lowercase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
625
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" lowerCamelCase__ : Any ={} lowerCamelCase__ : Tuple =tokenizer(example['''content'''] , truncation=__lowerCamelCase )['''input_ids'''] lowerCamelCase__ : Dict =len(example['''content'''] ) / len(output['''input_ids'''] ) return output _lowercase : Dict = HfArgumentParser(PretokenizationArguments) _lowercase : List[Any] = parser.parse_args() if args.num_workers is None: _lowercase : Tuple = multiprocessing.cpu_count() _lowercase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowercase : List[Any] = time.time() _lowercase : Optional[Any] = load_dataset(args.dataset_name, split="train") print(f'Dataset loaded in {time.time()-t_start:.2f}s') _lowercase : Tuple = time.time() _lowercase : Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'Dataset tokenized in {time.time()-t_start:.2f}s') _lowercase : List[str] = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'Data pushed to the hub in {time.time()-t_start:.2f}s')
625
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]: lowerCamelCase__ : List[str] =parent lowerCamelCase__ : Tuple =batch_size lowerCamelCase__ : str =image_size lowerCamelCase__ : Any =num_channels lowerCamelCase__ : Tuple =num_stages lowerCamelCase__ : List[str] =hidden_sizes lowerCamelCase__ : Any =depths lowerCamelCase__ : Union[str, Any] =is_training lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : int =intermediate_size lowerCamelCase__ : Optional[int] =hidden_act lowerCamelCase__ : Dict =type_sequence_label_size lowerCamelCase__ : Tuple =initializer_range lowerCamelCase__ : Any =out_features lowerCamelCase__ : Tuple =num_labels lowerCamelCase__ : Optional[int] =scope lowerCamelCase__ : Optional[int] =num_stages def snake_case ( self : str )-> Optional[int]: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple =None if self.use_labels: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : int =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Any: return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def snake_case ( self : Union[str, Any] )-> Any: return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, ) def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : int =model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any =config_and_inputs lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (UperNetForSemanticSegmentation,) if is_torch_available() else () _a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Optional[Any] =UperNetModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Optional[int] )-> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] )-> Dict: return def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) lowerCamelCase__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple =[*signature.parameters.keys()] lowerCamelCase__ : List[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : Any )-> Union[str, Any]: lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def snake_case ( self : Optional[Any] )-> List[Any]: pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def snake_case ( self : Any )-> List[str]: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : int )-> Any: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : Dict )-> str: pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def snake_case ( self : List[Any] )-> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : Tuple )-> str: pass def snake_case ( self : Optional[int] )-> List[str]: def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ): lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : List[str] =self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[Any] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : str =_config_zero_init(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def snake_case ( self : Any )-> str: pass @slow def snake_case ( self : int )-> Union[str, Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : str )-> Union[str, Any]: lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : List[Any] =prepare_img() lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Any =model(**lowerCamelCase ) lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : List[str] =torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
625
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = BlenderbotSmallConfig _a = {} _a = 'gelu' def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]: lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =batch_size lowerCamelCase__ : Optional[int] =seq_length lowerCamelCase__ : Tuple =is_training lowerCamelCase__ : Dict =use_labels lowerCamelCase__ : List[Any] =vocab_size lowerCamelCase__ : str =hidden_size lowerCamelCase__ : str =num_hidden_layers lowerCamelCase__ : Union[str, Any] =num_attention_heads lowerCamelCase__ : Any =intermediate_size lowerCamelCase__ : Dict =hidden_dropout_prob lowerCamelCase__ : List[Any] =attention_probs_dropout_prob lowerCamelCase__ : str =max_position_embeddings lowerCamelCase__ : Optional[int] =eos_token_id lowerCamelCase__ : str =pad_token_id lowerCamelCase__ : Union[str, Any] =bos_token_id def snake_case ( self : Any )-> Any: lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 ) lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : int =self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return config, inputs_dict def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder() lowerCamelCase__ : List[Any] =inputs_dict['''input_ids'''] lowerCamelCase__ : Optional[int] =input_ids[:1, :] lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :] lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask'''] lowerCamelCase__ : Optional[Any] =1 # first forward pass lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 ) lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0] lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) ) lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ): """simple docstring""" if attention_mask is None: lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ : str =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _a = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _a = True _a = False _a = False def snake_case ( self : Any )-> str: lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase ) def snake_case ( self : Any )-> Optional[int]: self.config_tester.run_common_tests() def snake_case ( self : int )-> str: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] _a = 'facebook/blenderbot_small-90M' @cached_property def snake_case ( self : Any )-> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def snake_case ( self : int )-> List[Any]: lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' ) lowerCamelCase__ : Any =self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, ) lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
625
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ): '''simple docstring''' _a = ['onnx'] def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]: requires_backends(self, ['''onnx'''] ) @classmethod def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]: requires_backends(cls, ['''onnx'''] ) @classmethod def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]: requires_backends(cls, ['''onnx'''] )
625
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : Tuple="" ): """simple docstring""" lowerCamelCase__ : Optional[int] =tempfile.mkdtemp() return os.path.join(__lowerCamelCase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : int )-> Tuple: lowerCamelCase__ : Optional[int] =torch.rand(12, dtype=torch.floataa ) - 0.5 lowerCamelCase__ : str =AgentAudio(lowerCamelCase ) lowerCamelCase__ : int =str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase, agent_type.to_raw(), atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowerCamelCase ) ) # Ensure that the file contains the same value as the original tensor lowerCamelCase__ , lowerCamelCase__ : Dict =sf.read(lowerCamelCase ) self.assertTrue(torch.allclose(lowerCamelCase, torch.tensor(lowerCamelCase ), atol=1E-4 ) ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : int =torch.rand(12, dtype=torch.floataa ) - 0.5 lowerCamelCase__ : List[Any] =get_new_path(suffix='''.wav''' ) sf.write(lowerCamelCase, lowerCamelCase, 1_6000 ) lowerCamelCase__ : Tuple =AgentAudio(lowerCamelCase ) self.assertTrue(torch.allclose(lowerCamelCase, agent_type.to_raw(), atol=1E-4 ) ) self.assertEqual(agent_type.to_string(), lowerCamelCase ) @require_vision @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : Tuple =torch.randint(0, 256, (64, 64, 3) ) lowerCamelCase__ : Optional[int] =AgentImage(lowerCamelCase ) lowerCamelCase__ : List[str] =str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCamelCase, agent_type._tensor, atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase ) ) def snake_case ( self : List[Any] )-> Tuple: lowerCamelCase__ : Optional[Any] =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' lowerCamelCase__ : Tuple =Image.open(lowerCamelCase ) lowerCamelCase__ : List[Any] =AgentImage(lowerCamelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase ) ) def snake_case ( self : List[str] )-> Dict: lowerCamelCase__ : Optional[int] =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' lowerCamelCase__ : int =Image.open(lowerCamelCase ) lowerCamelCase__ : List[Any] =AgentImage(lowerCamelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCamelCase ) ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : str )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] ='''Hey!''' lowerCamelCase__ : Union[str, Any] =AgentText(lowerCamelCase ) self.assertEqual(lowerCamelCase, agent_type.to_string() ) self.assertEqual(lowerCamelCase, agent_type.to_raw() ) self.assertEqual(lowerCamelCase, lowerCamelCase )
625
"""simple docstring""" import colorsys from PIL import Image # type: ignore def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =x lowerCamelCase__ : Any =y for step in range(__lowerCamelCase ): # noqa: B007 lowerCamelCase__ : List[Any] =a * a - b * b + x lowerCamelCase__ : Optional[int] =2 * a * b + y lowerCamelCase__ : Union[str, Any] =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) ) lowerCamelCase__ : Optional[int] =img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
625
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str, lowerCamelCase : int )-> None: lowerCamelCase__ : str =value lowerCamelCase__ : Node | None =None lowerCamelCase__ : Node | None =None class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Node )-> None: lowerCamelCase__ : Any =tree def snake_case ( self : str, lowerCamelCase : Node | None )-> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Dict )-> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =VideoMAEConfig() set_architecture_configs(__lowerCamelCase , __lowerCamelCase ) if "finetuned" not in model_name: lowerCamelCase__ : int =False if "finetuned" in model_name: lowerCamelCase__ : str ='''huggingface/label-files''' if "kinetics" in model_name: lowerCamelCase__ : List[Any] =400 lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json''' elif "ssv2" in model_name: lowerCamelCase__ : Tuple =174 lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Dict =idalabel lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()} return config def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): """simple docstring""" if "small" in model_name: lowerCamelCase__ : Optional[Any] =384 lowerCamelCase__ : List[Any] =1536 lowerCamelCase__ : int =12 lowerCamelCase__ : Dict =16 lowerCamelCase__ : List[Any] =12 lowerCamelCase__ : Optional[Any] =3 lowerCamelCase__ : Union[str, Any] =192 lowerCamelCase__ : str =768 elif "large" in model_name: lowerCamelCase__ : Union[str, Any] =1024 lowerCamelCase__ : str =4096 lowerCamelCase__ : int =24 lowerCamelCase__ : Dict =16 lowerCamelCase__ : Union[str, Any] =12 lowerCamelCase__ : List[Any] =8 lowerCamelCase__ : int =512 lowerCamelCase__ : Optional[Any] =2048 elif "huge" in model_name: lowerCamelCase__ : Optional[int] =1280 lowerCamelCase__ : Optional[int] =5120 lowerCamelCase__ : List[Any] =32 lowerCamelCase__ : List[Any] =16 lowerCamelCase__ : Optional[Any] =12 lowerCamelCase__ : Dict =8 lowerCamelCase__ : List[Any] =640 lowerCamelCase__ : Any =2560 elif "base" not in model_name: raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" if "encoder." in name: lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' ) if "cls_token" in name: lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' ) if "attn" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' ) return name def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase ) if key.startswith('''encoder.''' ): lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' ) if "qkv" in key: lowerCamelCase__ : Any =key.split('''.''' ) if key.startswith('''decoder.blocks''' ): lowerCamelCase__ : Tuple =config.decoder_hidden_size lowerCamelCase__ : str =int(key_split[2] ) lowerCamelCase__ : Any ='''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase__ : List[Any] =val[:dim, :] lowerCamelCase__ : Any =val[dim : dim * 2, :] lowerCamelCase__ : Dict =val[-dim:, :] else: lowerCamelCase__ : Optional[Any] =config.hidden_size lowerCamelCase__ : Optional[Any] =int(key_split[1] ) lowerCamelCase__ : str ='''videomae.encoder.layer.''' if "weight" in key: lowerCamelCase__ : int =val[:dim, :] lowerCamelCase__ : Tuple =val[dim : dim * 2, :] lowerCamelCase__ : List[Any] =val[-dim:, :] else: lowerCamelCase__ : int =val return orig_state_dict def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : List[Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase ) return list(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase ) if "finetuned" in model_name: lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase ) else: lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase ) # download original checkpoint, hosted on Google Drive lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin''' gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' ) if "model" in files: lowerCamelCase__ : Dict =files['''model'''] else: lowerCamelCase__ : str =files['''module'''] lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # verify model on basic input lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) lowerCamelCase__ : int =prepare_video() lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' ) if "finetuned" not in model_name: lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase ) lowerCamelCase__ : int =model(**__lowerCamelCase ) lowerCamelCase__ : Dict =outputs.logits lowerCamelCase__ : List[str] =[ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase__ : int =torch.Size([1, 174] ) lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase__ : List[str] =torch.Size([1, 400] ) lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase__ : str =torch.Size([1, 400] ) lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase__ : Optional[int] =torch.Size([1, 174] ) lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase__ : str =torch.Size([1, 174] ) lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase__ : str =outputs.loss assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
625
1
"""simple docstring""" from __future__ import annotations _lowercase : Dict = tuple[int, int, int] _lowercase : Optional[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _lowercase : Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- _lowercase : Optional[Any] = "EGZWVONAHDCLFQMSIPJBYUKXTR" _lowercase : List[Any] = "FOBHMDKEXQNRAULPGSJVTYICZW" _lowercase : Tuple = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- _lowercase : Any = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- _lowercase : Dict = "RMDJXFUWGISLHVTCQNKYPBEZOA" _lowercase : Optional[int] = "SGLCPQWZHKXAREONTFBVIYJUDM" _lowercase : List[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN" _lowercase : Tuple = "RZWQHFMVDBKICJLNTUXAGYPSOE" _lowercase : List[str] = "LFKIJODBEGAMQPXVUHYSTCZRWN" _lowercase : Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def snake_case__ ( __lowerCamelCase : RotorPositionT , __lowerCamelCase : RotorSelectionT , __lowerCamelCase : str ): """simple docstring""" # Checks if there are 3 unique rotors if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3: lowerCamelCase__ : List[Any] =f'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(__lowerCamelCase ) # Checks if rotor positions are valid lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =rotpos if not 0 < rotorposa <= len(__lowerCamelCase ): lowerCamelCase__ : str =f'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(__lowerCamelCase ) if not 0 < rotorposa <= len(__lowerCamelCase ): lowerCamelCase__ : List[Any] =f'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(__lowerCamelCase ) if not 0 < rotorposa <= len(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =f'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(__lowerCamelCase ) # Validates string and returns dict lowerCamelCase__ : Dict =_plugboard(__lowerCamelCase ) return rotpos, rotsel, pbdict def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : str =f'''Plugboard setting isn\'t type string ({type(__lowerCamelCase )})''' raise TypeError(__lowerCamelCase ) elif len(__lowerCamelCase ) % 2 != 0: lowerCamelCase__ : Any =f'''Odd number of symbols ({len(__lowerCamelCase )})''' raise Exception(__lowerCamelCase ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique lowerCamelCase__ : str =set() for i in pbstring: if i not in abc: lowerCamelCase__ : Optional[int] =f'''\'{i}\' not in list of symbols''' raise Exception(__lowerCamelCase ) elif i in tmppbl: lowerCamelCase__ : Optional[int] =f'''Duplicate symbol ({i})''' raise Exception(__lowerCamelCase ) else: tmppbl.add(__lowerCamelCase ) del tmppbl # Created the dictionary lowerCamelCase__ : Dict ={} for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ): lowerCamelCase__ : List[str] =pbstring[j + 1] lowerCamelCase__ : Dict =pbstring[j] return pb def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : RotorPositionT , __lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , __lowerCamelCase : str = "" , ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =text.upper() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =_validator( __lowerCamelCase , __lowerCamelCase , plugb.upper() ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =rotor_position lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCamelCase__ : List[str] =[] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCamelCase__ : Tuple =plugboard[symbol] # rotor ra -------------------------- lowerCamelCase__ : Union[str, Any] =abc.index(__lowerCamelCase ) + rotorposa lowerCamelCase__ : Tuple =rotora[index % len(__lowerCamelCase )] # rotor rb -------------------------- lowerCamelCase__ : List[Any] =abc.index(__lowerCamelCase ) + rotorposa lowerCamelCase__ : Tuple =rotora[index % len(__lowerCamelCase )] # rotor rc -------------------------- lowerCamelCase__ : List[str] =abc.index(__lowerCamelCase ) + rotorposa lowerCamelCase__ : Optional[int] =rotora[index % len(__lowerCamelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCamelCase__ : List[Any] =reflector[symbol] # 2nd rotors lowerCamelCase__ : List[Any] =abc[rotora.index(__lowerCamelCase ) - rotorposa] lowerCamelCase__ : List[str] =abc[rotora.index(__lowerCamelCase ) - rotorposa] lowerCamelCase__ : Union[str, Any] =abc[rotora.index(__lowerCamelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCamelCase__ : int =plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): lowerCamelCase__ : str =0 rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =0 rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): lowerCamelCase__ : str =0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": _lowercase : Any = "This is my Python script that emulates the Enigma machine from WWII." _lowercase : str = (1, 1, 1) _lowercase : Optional[Any] = "pictures" _lowercase : Tuple = (rotora, rotora, rotora) _lowercase : Any = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
625
"""simple docstring""" _lowercase : str = 0 # The first color of the flag. _lowercase : Dict = 1 # The second color of the flag. _lowercase : Tuple = 2 # The third color of the flag. _lowercase : Optional[int] = (red, white, blue) def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" if not sequence: return [] if len(__lowerCamelCase ) == 1: return list(__lowerCamelCase ) lowerCamelCase__ : List[Any] =0 lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1 lowerCamelCase__ : Tuple =0 while mid <= high: if sequence[mid] == colors[0]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid] high -= 1 else: lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(__lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip() _lowercase : int = [int(item.strip()) for item in user_input.split(",")] print(f'{dutch_national_flag_sort(unsorted)}')
625
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : List[str] =SwinvaConfig() lowerCamelCase__ : str =swinva_name.split('''_''' ) lowerCamelCase__ : Tuple =name_split[1] if "to" in name_split[3]: lowerCamelCase__ : int =int(name_split[3][-3:] ) else: lowerCamelCase__ : Any =int(name_split[3] ) if "to" in name_split[2]: lowerCamelCase__ : List[Any] =int(name_split[2][-2:] ) else: lowerCamelCase__ : str =int(name_split[2][6:] ) if model_size == "tiny": lowerCamelCase__ : List[str] =96 lowerCamelCase__ : Union[str, Any] =(2, 2, 6, 2) lowerCamelCase__ : Dict =(3, 6, 12, 24) elif model_size == "small": lowerCamelCase__ : int =96 lowerCamelCase__ : Tuple =(2, 2, 18, 2) lowerCamelCase__ : List[str] =(3, 6, 12, 24) elif model_size == "base": lowerCamelCase__ : Union[str, Any] =128 lowerCamelCase__ : str =(2, 2, 18, 2) lowerCamelCase__ : Optional[Any] =(4, 8, 16, 32) else: lowerCamelCase__ : Optional[Any] =192 lowerCamelCase__ : Optional[Any] =(2, 2, 18, 2) lowerCamelCase__ : int =(6, 12, 24, 48) if "to" in swinva_name: lowerCamelCase__ : Any =(12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): lowerCamelCase__ : Optional[int] =21841 lowerCamelCase__ : Dict ='''huggingface/label-files''' lowerCamelCase__ : Any ='''imagenet-22k-id2label.json''' lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : int ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Optional[Any] =idalabel lowerCamelCase__ : Union[str, Any] ={v: k for k, v in idalabel.items()} else: lowerCamelCase__ : Optional[int] =1000 lowerCamelCase__ : Optional[Any] ='''huggingface/label-files''' lowerCamelCase__ : int ='''imagenet-1k-id2label.json''' lowerCamelCase__ : Optional[Any] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : str ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Union[str, Any] =idalabel lowerCamelCase__ : Tuple ={v: k for k, v in idalabel.items()} lowerCamelCase__ : List[str] =img_size lowerCamelCase__ : Optional[int] =num_classes lowerCamelCase__ : str =embed_dim lowerCamelCase__ : Tuple =depths lowerCamelCase__ : Dict =num_heads lowerCamelCase__ : str =window_size return config def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" if "patch_embed.proj" in name: lowerCamelCase__ : List[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ : Optional[int] =name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCamelCase__ : Optional[int] ='''encoder.''' + name if "attn.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCamelCase__ : Tuple =name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCamelCase__ : int =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : str =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : str =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : List[str] =name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: lowerCamelCase__ : int =name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: lowerCamelCase__ : List[Any] =name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: lowerCamelCase__ : Tuple =name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: lowerCamelCase__ : List[str] =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": lowerCamelCase__ : Optional[Any] ='''layernorm.weight''' if name == "norm.bias": lowerCamelCase__ : List[Any] ='''layernorm.bias''' if "head" in name: lowerCamelCase__ : Dict =name.replace('''head''' , '''classifier''' ) else: lowerCamelCase__ : Tuple ='''swinv2.''' + name return name def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : List[Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase ) if "mask" in key: continue elif "qkv" in key: lowerCamelCase__ : List[Any] =key.split('''.''' ) lowerCamelCase__ : Tuple =int(key_split[1] ) lowerCamelCase__ : Dict =int(key_split[3] ) lowerCamelCase__ : Optional[int] =model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase__ : List[str] =val[:dim, :] lowerCamelCase__ : Optional[int] =val[dim : dim * 2, :] lowerCamelCase__ : str =val[-dim:, :] else: lowerCamelCase__ : Dict =val[:dim] lowerCamelCase__ : List[str] =val[ dim : dim * 2 ] lowerCamelCase__ : Any =val[-dim:] else: lowerCamelCase__ : Dict =val return orig_state_dict def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Optional[Any] =timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ) timm_model.eval() lowerCamelCase__ : Union[str, Any] =get_swinva_config(__lowerCamelCase ) lowerCamelCase__ : str =SwinvaForImageClassification(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[Any] =convert_state_dict(timm_model.state_dict() , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) lowerCamelCase__ : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) lowerCamelCase__ : List[Any] =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) lowerCamelCase__ : Optional[Any] =image_processor(images=__lowerCamelCase , return_tensors='''pt''' ) lowerCamelCase__ : Union[str, Any] =timm_model(inputs['''pixel_values'''] ) lowerCamelCase__ : List[str] =model(**__lowerCamelCase ).logits assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) model.push_to_hub( repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": _lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _lowercase : Union[str, Any] = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
625
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = StableUnCLIPImgaImgPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _a = frozenset([] ) def snake_case ( self : List[str] )-> str: lowerCamelCase__ : Dict =32 lowerCamelCase__ : Optional[Any] =embedder_hidden_size # image encoding components lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple =CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0 ) lowerCamelCase__ : Dict =UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, ) torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =DDIMScheduler( beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =AutoencoderKL() lowerCamelCase__ : int ={ # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]: if str(lowerCamelCase ).startswith('''mps''' ): lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase ) else: lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: lowerCamelCase__ : int =input_image * 0.5 + 0.5 lowerCamelCase__ : Dict =input_image.clamp(0, 1 ) lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : str =self.get_dummy_components() lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase ) lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase ) inputs.update({'''image_embeds''': None} ) lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self : int )-> Tuple: lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def snake_case ( self : List[str] )-> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[int] )-> int: lowerCamelCase__ : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : Optional[int] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : List[Any] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : Tuple =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : List[Any] =pipe( lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', ) lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
625
1
"""simple docstring""" from collections.abc import Callable import numpy as np def snake_case__ ( __lowerCamelCase : Callable , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): """simple docstring""" lowerCamelCase__ : List[Any] =int(np.ceil((x_end - xa) / step_size ) ) lowerCamelCase__ : Optional[int] =np.zeros((n + 1,) ) lowerCamelCase__ : List[Any] =ya lowerCamelCase__ : Any =xa for k in range(__lowerCamelCase ): lowerCamelCase__ : List[str] =y[k] + step_size * ode_func(__lowerCamelCase , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 4000000 ): """simple docstring""" lowerCamelCase__ : Dict =[] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b return sum(__lowerCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
625
1
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _lowercase : Dict = { "sample_size": 3_2, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1_0_0_0, "block_out_channels": [3_2, 6_4], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } _lowercase : List[str] = { "sample_size": 6_4, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1_0_0_0, "block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], "attention_head_dim": 6_4, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } _lowercase : Any = { "sample_size": 2_5_6, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], "attention_head_dim": 6_4, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } _lowercase : Union[str, Any] = { "num_train_timesteps": 4_0, "sigma_min": 0.002, "sigma_max": 80.0, } _lowercase : List[str] = { "num_train_timesteps": 2_0_1, "sigma_min": 0.002, "sigma_max": 80.0, } _lowercase : List[Any] = { "num_train_timesteps": 1_5_1, "sigma_min": 0.002, "sigma_max": 80.0, } def snake_case__ ( __lowerCamelCase : Tuple ): """simple docstring""" if isinstance(__lowerCamelCase , __lowerCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ : str =checkpoint[f'''{old_prefix}.in_layers.0.weight'''] lowerCamelCase__ : str =checkpoint[f'''{old_prefix}.in_layers.0.bias'''] lowerCamelCase__ : Optional[int] =checkpoint[f'''{old_prefix}.in_layers.2.weight'''] lowerCamelCase__ : Optional[Any] =checkpoint[f'''{old_prefix}.in_layers.2.bias'''] lowerCamelCase__ : Optional[Any] =checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] lowerCamelCase__ : Union[str, Any] =checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] lowerCamelCase__ : Any =checkpoint[f'''{old_prefix}.out_layers.0.weight'''] lowerCamelCase__ : Optional[Any] =checkpoint[f'''{old_prefix}.out_layers.0.bias'''] lowerCamelCase__ : Any =checkpoint[f'''{old_prefix}.out_layers.3.weight'''] lowerCamelCase__ : int =checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: lowerCamelCase__ : List[str] =checkpoint[f'''{old_prefix}.skip_connection.weight'''] lowerCamelCase__ : Dict =checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : str=None ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) lowerCamelCase__ : str =checkpoint[f'''{old_prefix}.norm.weight'''] lowerCamelCase__ : List[str] =checkpoint[f'''{old_prefix}.norm.bias'''] lowerCamelCase__ : Any =weight_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Dict =bias_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : str =weight_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Dict =bias_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Any =weight_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : str =bias_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : str =( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) lowerCamelCase__ : List[Any] =checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : List[str] =torch.load(__lowerCamelCase , map_location='''cpu''' ) lowerCamelCase__ : int ={} lowerCamelCase__ : Union[str, Any] =checkpoint['''time_embed.0.weight'''] lowerCamelCase__ : Any =checkpoint['''time_embed.0.bias'''] lowerCamelCase__ : List[Any] =checkpoint['''time_embed.2.weight'''] lowerCamelCase__ : str =checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: lowerCamelCase__ : Optional[Any] =checkpoint['''label_emb.weight'''] lowerCamelCase__ : int =checkpoint['''input_blocks.0.0.weight'''] lowerCamelCase__ : Any =checkpoint['''input_blocks.0.0.bias'''] lowerCamelCase__ : int =unet_config['''down_block_types'''] lowerCamelCase__ : str =unet_config['''layers_per_block'''] lowerCamelCase__ : Optional[Any] =unet_config['''attention_head_dim'''] lowerCamelCase__ : str =unet_config['''block_out_channels'''] lowerCamelCase__ : List[Any] =1 lowerCamelCase__ : int =channels_list[0] for i, layer_type in enumerate(__lowerCamelCase ): lowerCamelCase__ : Tuple =channels_list[i] lowerCamelCase__ : str =current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__lowerCamelCase ): lowerCamelCase__ : List[Any] =f'''down_blocks.{i}.resnets.{j}''' lowerCamelCase__ : str =f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : int =True if j == 0 and downsample_block_has_skip else False lowerCamelCase__ : List[Any] =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__lowerCamelCase ): lowerCamelCase__ : Any =f'''down_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Any =f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : Union[str, Any] =True if j == 0 and downsample_block_has_skip else False lowerCamelCase__ : Optional[int] =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) lowerCamelCase__ : Dict =f'''down_blocks.{i}.attentions.{j}''' lowerCamelCase__ : int =f'''input_blocks.{current_layer}.1''' lowerCamelCase__ : str =convert_attention( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) current_layer += 1 if i != len(__lowerCamelCase ) - 1: lowerCamelCase__ : Optional[Any] =f'''down_blocks.{i}.downsamplers.0''' lowerCamelCase__ : List[str] =f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : Any =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) current_layer += 1 lowerCamelCase__ : Optional[Any] =current_channels # hardcoded the mid-block for now lowerCamelCase__ : int ='''mid_block.resnets.0''' lowerCamelCase__ : Optional[Any] ='''middle_block.0''' lowerCamelCase__ : Dict =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Any ='''mid_block.attentions.0''' lowerCamelCase__ : str ='''middle_block.1''' lowerCamelCase__ : List[Any] =convert_attention(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : int ='''mid_block.resnets.1''' lowerCamelCase__ : Optional[Any] ='''middle_block.2''' lowerCamelCase__ : str =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] =0 lowerCamelCase__ : str =unet_config['''up_block_types'''] for i, layer_type in enumerate(__lowerCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase__ : List[str] =f'''up_blocks.{i}.resnets.{j}''' lowerCamelCase__ : str =f'''output_blocks.{current_layer}.0''' lowerCamelCase__ : List[str] =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) current_layer += 1 if i != len(__lowerCamelCase ) - 1: lowerCamelCase__ : Dict =f'''up_blocks.{i}.upsamplers.0''' lowerCamelCase__ : str =f'''output_blocks.{current_layer-1}.1''' lowerCamelCase__ : int =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase__ : List[Any] =f'''up_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Any =f'''output_blocks.{current_layer}.0''' lowerCamelCase__ : Union[str, Any] =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase ) lowerCamelCase__ : Tuple =f'''up_blocks.{i}.attentions.{j}''' lowerCamelCase__ : Tuple =f'''output_blocks.{current_layer}.1''' lowerCamelCase__ : str =convert_attention( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) current_layer += 1 if i != len(__lowerCamelCase ) - 1: lowerCamelCase__ : Tuple =f'''up_blocks.{i}.upsamplers.0''' lowerCamelCase__ : List[str] =f'''output_blocks.{current_layer-1}.2''' lowerCamelCase__ : Optional[Any] =convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[Any] =checkpoint['''out.0.weight'''] lowerCamelCase__ : Optional[int] =checkpoint['''out.0.bias'''] lowerCamelCase__ : List[Any] =checkpoint['''out.2.weight'''] lowerCamelCase__ : Optional[Any] =checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": _lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") _lowercase : Dict = parser.parse_args() _lowercase : Dict = strabool(args.class_cond) _lowercase : str = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: _lowercase : int = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _lowercase : Dict = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _lowercase : Optional[Any] = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: _lowercase : Dict = None _lowercase : Tuple = con_pt_to_diffuser(args.unet_path, unet_config) _lowercase : Dict = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _lowercase : Any = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _lowercase : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _lowercase : int = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') _lowercase : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config) _lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
625
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = BlenderbotSmallConfig _a = {} _a = 'gelu' def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]: lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =batch_size lowerCamelCase__ : Optional[int] =seq_length lowerCamelCase__ : Tuple =is_training lowerCamelCase__ : Dict =use_labels lowerCamelCase__ : List[Any] =vocab_size lowerCamelCase__ : str =hidden_size lowerCamelCase__ : str =num_hidden_layers lowerCamelCase__ : Union[str, Any] =num_attention_heads lowerCamelCase__ : Any =intermediate_size lowerCamelCase__ : Dict =hidden_dropout_prob lowerCamelCase__ : List[Any] =attention_probs_dropout_prob lowerCamelCase__ : str =max_position_embeddings lowerCamelCase__ : Optional[int] =eos_token_id lowerCamelCase__ : str =pad_token_id lowerCamelCase__ : Union[str, Any] =bos_token_id def snake_case ( self : Any )-> Any: lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 ) lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : int =self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return config, inputs_dict def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder() lowerCamelCase__ : List[Any] =inputs_dict['''input_ids'''] lowerCamelCase__ : Optional[int] =input_ids[:1, :] lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :] lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask'''] lowerCamelCase__ : Optional[Any] =1 # first forward pass lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 ) lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0] lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) ) lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ): """simple docstring""" if attention_mask is None: lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ : str =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _a = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _a = True _a = False _a = False def snake_case ( self : Any )-> str: lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase ) def snake_case ( self : Any )-> Optional[int]: self.config_tester.run_common_tests() def snake_case ( self : int )-> str: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] _a = 'facebook/blenderbot_small-90M' @cached_property def snake_case ( self : Any )-> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def snake_case ( self : int )-> List[Any]: lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' ) lowerCamelCase__ : Any =self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, ) lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
625
1
"""simple docstring""" import numpy as np import datasets _lowercase : int = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n" _lowercase : Tuple = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n" _lowercase : List[str] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def snake_case ( self : List[Any] )-> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''', id='''sequence''' ), id='''X''' ), } ), ) def snake_case ( self : int, lowerCamelCase : Dict, lowerCamelCase : str )-> List[str]: # convert to numpy arrays lowerCamelCase__ : Any =np.array(lowerCamelCase ) lowerCamelCase__ : Tuple =np.array(lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction lowerCamelCase__ : Optional[Any] =X - np.mean(lowerCamelCase ) lowerCamelCase__ : int =np.cov(reference_distribution.T ) try: lowerCamelCase__ : Optional[int] =np.linalg.inv(lowerCamelCase ) except np.linalg.LinAlgError: lowerCamelCase__ : Dict =np.linalg.pinv(lowerCamelCase ) lowerCamelCase__ : Dict =np.dot(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any =np.dot(lowerCamelCase, X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ): """simple docstring""" # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ): """simple docstring""" # Base Case if curr_ind == len(__lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__lowerCamelCase ) ): if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): # Insert current vertex into path as next transition lowerCamelCase__ : Tuple =next_ver # Validate created path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ): return True # Backtrack lowerCamelCase__ : int =-1 return False def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ): """simple docstring""" lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1) # initialize start and end of path with starting index lowerCamelCase__ : Union[str, Any] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
625
1
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _lowercase : str = logging.get_logger(__name__) _lowercase : Optional[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!" def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : List[Any] ={ '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1e-5, '''token_type_vocab_size''': 2, } lowerCamelCase__ : List[str] =bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCamelCase__ : int =BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCamelCase ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCamelCase__ : Optional[Any] ='''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab lowerCamelCase__ : Union[str, Any] =os.path.join(get_home_dir() , '''models''' ) lowerCamelCase__ : Any =_load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase ) lowerCamelCase__ : Any =nlp.model.BERTModel( __lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , ) original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase ) lowerCamelCase__ : Optional[int] =original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCamelCase__ : Union[str, Any] ={ '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(__lowerCamelCase ), } lowerCamelCase__ : int =BertConfig.from_dict(__lowerCamelCase ) lowerCamelCase__ : Dict =BertForMaskedLM(__lowerCamelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(__lowerCamelCase : Dict ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ): lowerCamelCase__ : Union[str, Any] =hf_param.shape lowerCamelCase__ : Optional[int] =to_torch(params[gluon_param] ) lowerCamelCase__ : Optional[int] =gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param lowerCamelCase__ : Union[str, Any] =check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) lowerCamelCase__ : Tuple =check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) lowerCamelCase__ : Dict =check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) lowerCamelCase__ : Union[str, Any] =check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCamelCase__ : Union[str, Any] =torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCamelCase__ : BertLayer =hf_bort_model.bert.encoder.layer[i] # self attention lowerCamelCase__ : BertSelfAttention =layer.attention.self lowerCamelCase__ : Optional[Any] =check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) lowerCamelCase__ : Optional[int] =check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) lowerCamelCase__ : List[Any] =check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) lowerCamelCase__ : Optional[Any] =check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) lowerCamelCase__ : str =check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) lowerCamelCase__ : List[str] =check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output lowerCamelCase__ : BertSelfOutput =layer.attention.output lowerCamelCase__ : Dict =check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) lowerCamelCase__ : Any =check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) lowerCamelCase__ : str =check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) lowerCamelCase__ : Tuple =check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate lowerCamelCase__ : BertIntermediate =layer.intermediate lowerCamelCase__ : Union[str, Any] =check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) lowerCamelCase__ : Any =check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output lowerCamelCase__ : BertOutput =layer.output lowerCamelCase__ : int =check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) lowerCamelCase__ : Any =check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) lowerCamelCase__ : Optional[Any] =check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) lowerCamelCase__ : Union[str, Any] =check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCamelCase__ : Dict =RobertaTokenizer.from_pretrained('''roberta-base''' ) lowerCamelCase__ : Tuple =tokenizer.encode_plus(__lowerCamelCase )['''input_ids'''] # Get gluon output lowerCamelCase__ : Optional[Any] =mx.nd.array([input_ids] ) lowerCamelCase__ : Optional[Any] =original_bort(inputs=__lowerCamelCase , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : int =BertModel.from_pretrained(__lowerCamelCase ) hf_bort_model.eval() lowerCamelCase__ : Union[str, Any] =tokenizer.encode_plus(__lowerCamelCase , return_tensors='''pt''' ) lowerCamelCase__ : Any =hf_bort_model(**__lowerCamelCase )[0] lowerCamelCase__ : Dict =output_gluon[0].asnumpy() lowerCamelCase__ : Union[str, Any] =output_hf[0].detach().numpy() lowerCamelCase__ : List[Any] =np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCamelCase__ : List[Any] =np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , __lowerCamelCase ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowercase : List[str] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
625
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 2_5_0_0_0_4 _lowercase : Optional[Any] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = MBartTokenizer _a = MBartTokenizerFast _a = True _a = True def snake_case ( self : Tuple )-> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict )-> Union[str, Any]: lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def snake_case ( self : Tuple )-> List[Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : List[str] =tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ : Dict =tempfile.mkdtemp() lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ : Optional[int] =tempfile.mkdtemp() lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = 'facebook/mbart-large-en-ro' _a = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _a = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case ( cls : List[Any] )-> Optional[int]: lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) lowerCamelCase__ : Optional[int] =1 return cls def snake_case ( self : Optional[Any] )-> List[str]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 ) def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) def snake_case ( self : Optional[Any] )-> str: self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids ) lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase, lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], lowerCamelCase ) lowerCamelCase__ : Dict =10 lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2], 2 ) self.assertEqual(ids[-1], lowerCamelCase ) self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : int )-> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] ) def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : int =tempfile.mkdtemp() lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase ) @require_torch def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' ) lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case ( self : Optional[Any] )-> Any: lowerCamelCase__ : str =self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase, lowerCamelCase ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) lowerCamelCase__ : Any =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' ) lowerCamelCase__ : Tuple =self.tokenizer( text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' ) lowerCamelCase__ : Union[str, Any] =targets['''input_ids'''] lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : str =self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCamelCase ), { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 25_0004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_0001, }, )
625
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[Any] )-> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Tuple )-> Optional[int]: lowerCamelCase__ : Optional[Any] =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) lowerCamelCase__ : List[str] =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) sd_pipe.set_scheduler('''sample_euler''' ) lowerCamelCase__ : List[str] ='''A painting of a squirrel eating a burger''' lowerCamelCase__ : Optional[int] =torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type='''np''' ) lowerCamelCase__ : int =output.images lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : str =np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : List[str] )-> Tuple: lowerCamelCase__ : int =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) lowerCamelCase__ : str =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) sd_pipe.set_scheduler('''sample_euler''' ) lowerCamelCase__ : str ='''A painting of a squirrel eating a burger''' lowerCamelCase__ : List[str] =torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type='''np''' ) lowerCamelCase__ : Union[str, Any] =output.images lowerCamelCase__ : str =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : List[Any] =np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def snake_case ( self : Any )-> int: lowerCamelCase__ : Optional[int] =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) lowerCamelCase__ : Tuple =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) lowerCamelCase__ : List[Any] ='''A painting of a squirrel eating a burger''' lowerCamelCase__ : Any =torch.manual_seed(0 ) lowerCamelCase__ : Any =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=15, output_type='''np''', use_karras_sigmas=lowerCamelCase, ) lowerCamelCase__ : List[str] =output.images lowerCamelCase__ : Optional[int] =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : Tuple =np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
625
1
"""simple docstring""" import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Dict=3, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Tuple=3, lowerCamelCase : Optional[Any]=10, lowerCamelCase : Dict=[8, 16, 32, 64], lowerCamelCase : Dict=[1, 1, 2, 1], lowerCamelCase : Tuple=True, lowerCamelCase : Any=True, lowerCamelCase : Any="relu", lowerCamelCase : List[str]=3, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Optional[int]=["stage2", "stage3", "stage4"], lowerCamelCase : List[str]=[2, 3, 4], lowerCamelCase : Union[str, Any]=1, )-> Any: lowerCamelCase__ : str =parent lowerCamelCase__ : Union[str, Any] =batch_size lowerCamelCase__ : List[Any] =image_size lowerCamelCase__ : List[str] =num_channels lowerCamelCase__ : Dict =embeddings_size lowerCamelCase__ : Optional[int] =hidden_sizes lowerCamelCase__ : Optional[Any] =depths lowerCamelCase__ : List[Any] =is_training lowerCamelCase__ : List[str] =use_labels lowerCamelCase__ : Tuple =hidden_act lowerCamelCase__ : Any =num_labels lowerCamelCase__ : Tuple =scope lowerCamelCase__ : List[str] =len(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =out_features lowerCamelCase__ : Union[str, Any] =out_indices lowerCamelCase__ : List[str] =num_groups def snake_case ( self : str )-> Tuple: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Any =None if self.use_labels: lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size], self.num_labels ) lowerCamelCase__ : Dict =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Dict: return BitConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, ) def snake_case ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Optional[Any] )-> List[Any]: lowerCamelCase__ : Dict =BitModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Dict =model(lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def snake_case ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] )-> str: lowerCamelCase__ : Dict =self.num_labels lowerCamelCase__ : List[str] =BitForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] =model(lowerCamelCase, labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int] )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =BitBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : str =model(lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase__ : Dict =None lowerCamelCase__ : Optional[int] =BitBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple =model(lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs lowerCamelCase__ : str ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _a = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False _a = False def snake_case ( self : Union[str, Any] )-> Union[str, Any]: lowerCamelCase__ : Dict =BitModelTester(self ) lowerCamelCase__ : int =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase ) def snake_case ( self : Optional[Any] )-> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[Any] )-> List[Any]: return @unittest.skip(reason='''Bit does not output attentions''' ) def snake_case ( self : Optional[Any] )-> List[Any]: pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def snake_case ( self : Tuple )-> Optional[int]: pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def snake_case ( self : Any )-> str: pass def snake_case ( self : Union[str, Any] )-> Any: lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Dict =model_class(lowerCamelCase ) lowerCamelCase__ : Dict =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Optional[Any] =[*signature.parameters.keys()] lowerCamelCase__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def snake_case ( self : Dict )-> List[str]: lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[str] =model_class(config=lowerCamelCase ) for name, module in model.named_modules(): if isinstance(lowerCamelCase, (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) self.assertTrue( torch.all(module.bias == 0 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) def snake_case ( self : List[str] )-> List[str]: def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ): lowerCamelCase__ : Tuple =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Union[str, Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Optional[int] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : str =self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : int =['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase__ : List[Any] =layer_type lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def snake_case ( self : List[Any] )-> int: pass def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def snake_case ( self : Any )-> int: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =BitModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : int =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case ( self : Tuple )-> Any: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case ( self : Dict )-> List[str]: lowerCamelCase__ : Optional[Any] =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =self.default_image_processor lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Union[str, Any] =image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits lowerCamelCase__ : str =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) ) @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (BitBackbone,) if is_torch_available() else () _a = BitConfig _a = False def snake_case ( self : Optional[Any] )-> List[Any]: lowerCamelCase__ : List[str] =BitModelTester(self )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ): """simple docstring""" lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase ) lowerCamelCase__ : str =range(1 , __lowerCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
625
1
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 42 _a = 42 class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' _a = 42 _a = (1_6, 3_2, 9_6, 2_5_6) _a = jnp.floataa def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Tuple =nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) lowerCamelCase__ : Dict =[] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ : Dict =self.block_out_channels[i] lowerCamelCase__ : Dict =self.block_out_channels[i + 1] lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Optional[int] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Any =blocks lowerCamelCase__ : Optional[int] =nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase ) lowerCamelCase__ : Dict =nn.silu(lowerCamelCase ) for block in self.blocks: lowerCamelCase__ : str =block(lowerCamelCase ) lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase ) lowerCamelCase__ : Any =self.conv_out(lowerCamelCase ) return embedding @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' _a = 3_2 _a = 4 _a = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _a = False _a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _a = 2 _a = 8 _a = None _a = 1_2_8_0 _a = 0.0 _a = False _a = jnp.floataa _a = True _a = 0 _a = "rgb" _a = (1_6, 3_2, 9_6, 2_5_6) def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict: # init input tensors lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa ) lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase ) lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"] def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Optional[int] =self.block_out_channels lowerCamelCase__ : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : int =nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time lowerCamelCase__ : str =FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype ) lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) lowerCamelCase__ : Dict =self.only_cross_attention if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Dict =[] lowerCamelCase__ : List[Any] =block_out_channels[0] lowerCamelCase__ : List[Any] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] =output_channel lowerCamelCase__ : str =block_out_channels[i] lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: lowerCamelCase__ : List[Any] =FlaxDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(lowerCamelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) if not is_final_block: lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) lowerCamelCase__ : int =down_blocks lowerCamelCase__ : List[str] =controlnet_down_blocks # mid lowerCamelCase__ : Tuple =block_out_channels[-1] lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase__ : int =self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 ) # 1. time if not isinstance(lowerCamelCase, jnp.ndarray ): lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 ) lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase ) # 2. pre-process lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase ) lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ : Union[str, Any] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) # 5. contronet blocks lowerCamelCase__ : Optional[Any] =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ): lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : List[str] =controlnet_down_block_res_samples lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase ) # 6. scaling lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
625
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if isinstance(__lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]: pass def snake_case ( self : List[str] )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict: lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int: lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : int =output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : List[str] =after_output[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[str] =model( input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : int =output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : int =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] =output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any: pt_model.to(lowerCamelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ : Any =inputs_dict lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) pt_model_loaded.to(lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 ) def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]: lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Tuple =fx_state self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : int =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def snake_case ( self : Tuple )-> Any: lowerCamelCase__ : Tuple =self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def snake_case ( self : str )-> Any: lowerCamelCase__ : str =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' ) lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' ) lowerCamelCase__ : Tuple =config_inputs_dict self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase ) self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs() lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[str] =outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[Any] =after_outputs[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : List[str] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int: lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase ) lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =FlaxViTModelTester(self ) lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : Optional[Any] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : str =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict: lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self ) lowerCamelCase__ : List[Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase__ : Dict =processor( text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ) lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
625
1
"""simple docstring""" import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore _lowercase : Union[str, Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" _lowercase : Any = [file for file in filepaths if file != file.lower()] if upper_files: print(f'{len(upper_files)} files contain uppercase characters:') print("\n".join(upper_files) + "\n") _lowercase : List[str] = [file for file in filepaths if " " in file] if space_files: print(f'{len(space_files)} files contain space characters:') print("\n".join(space_files) + "\n") _lowercase : List[str] = [file for file in filepaths if "-" in file] if hyphen_files: print(f'{len(hyphen_files)} files contain hyphen characters:') print("\n".join(hyphen_files) + "\n") _lowercase : Tuple = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'{len(nodir_files)} files are not in a directory:') print("\n".join(nodir_files) + "\n") _lowercase : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if index == number_of_items: return 0 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ : Dict =values[index] + knapsack( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def snake_case ( lowerCamelCase : List[Any] )-> Any: lowerCamelCase__ : Union[str, Any] =PartialState() return not main_process_only or (main_process_only and state.is_main_process) def snake_case ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], *lowerCamelCase : int, **lowerCamelCase : int )-> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowerCamelCase__ : Any =kwargs.pop('''main_process_only''', lowerCamelCase ) lowerCamelCase__ : Dict =kwargs.pop('''in_order''', lowerCamelCase ) if self.isEnabledFor(lowerCamelCase ): if self._should_log(lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Tuple =self.process(lowerCamelCase, lowerCamelCase ) self.logger.log(lowerCamelCase, lowerCamelCase, *lowerCamelCase, **lowerCamelCase ) elif in_order: lowerCamelCase__ : Any =PartialState() for i in range(state.num_processes ): if i == state.process_index: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.process(lowerCamelCase, lowerCamelCase ) self.logger.log(lowerCamelCase, lowerCamelCase, *lowerCamelCase, **lowerCamelCase ) state.wait_for_everyone() def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str = None ): """simple docstring""" if log_level is None: lowerCamelCase__ : Optional[Any] =os.environ.get('''ACCELERATE_LOG_LEVEL''' , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =logging.getLogger(__lowerCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__lowerCamelCase , {} )
625
"""simple docstring""" _lowercase : Optional[Any] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
625
1
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def snake_case ( self : Union[str, Any] )-> List[str]: lowerCamelCase__ : Dict =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase, '''tf_padding''' ) ) self.parent.assertTrue(hasattr(lowerCamelCase, '''depth_multiplier''' ) ) class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str]=13, lowerCamelCase : str=3, lowerCamelCase : List[str]=32, lowerCamelCase : Any=0.25, lowerCamelCase : Optional[int]=8, lowerCamelCase : str=True, lowerCamelCase : Tuple=1024, lowerCamelCase : Optional[int]=32, lowerCamelCase : Union[str, Any]="relu6", lowerCamelCase : Any=0.1, lowerCamelCase : Tuple=0.02, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[int]=10, lowerCamelCase : int=None, )-> Any: lowerCamelCase__ : Any =parent lowerCamelCase__ : Optional[Any] =batch_size lowerCamelCase__ : Union[str, Any] =num_channels lowerCamelCase__ : str =image_size lowerCamelCase__ : Union[str, Any] =depth_multiplier lowerCamelCase__ : List[str] =min_depth lowerCamelCase__ : int =tf_padding lowerCamelCase__ : Optional[Any] =int(last_hidden_size * depth_multiplier ) lowerCamelCase__ : List[str] =output_stride lowerCamelCase__ : Optional[Any] =hidden_act lowerCamelCase__ : Tuple =classifier_dropout_prob lowerCamelCase__ : Any =use_labels lowerCamelCase__ : int =is_training lowerCamelCase__ : Union[str, Any] =num_labels lowerCamelCase__ : int =initializer_range lowerCamelCase__ : Optional[int] =scope def snake_case ( self : List[str] )-> Union[str, Any]: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Any =None lowerCamelCase__ : Any =None if self.use_labels: lowerCamelCase__ : Dict =ids_tensor([self.batch_size], self.num_labels ) lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) lowerCamelCase__ : Dict =self.get_config() return config, pixel_values, labels, pixel_labels def snake_case ( self : int )-> List[Any]: return MobileNetVaConfig( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any] )-> Tuple: lowerCamelCase__ : str =MobileNetVaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[int] =model(lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : int )-> int: lowerCamelCase__ : Optional[Any] =self.num_labels lowerCamelCase__ : Any =MobileNetVaForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] =model(lowerCamelCase, labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case ( self : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : str =self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =config_and_inputs lowerCamelCase__ : Any ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () _a = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False def snake_case ( self : Any )-> Union[str, Any]: lowerCamelCase__ : str =MobileNetVaModelTester(self ) lowerCamelCase__ : int =MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase ) def snake_case ( self : Optional[int] )-> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def snake_case ( self : Dict )-> Union[str, Any]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def snake_case ( self : Dict )-> str: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def snake_case ( self : List[str] )-> Tuple: pass def snake_case ( self : Dict )-> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase ) lowerCamelCase__ : int =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Optional[int] =[*signature.parameters.keys()] lowerCamelCase__ : Union[str, Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : Optional[Any] )-> List[str]: lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def snake_case ( self : int )-> List[Any]: def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] ): lowerCamelCase__ : int =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : List[Any] =outputs.hidden_states lowerCamelCase__ : Any =26 self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : List[str] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> List[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Optional[int] =MobileNetVaModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case ( self : List[Any] )-> List[Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def snake_case ( self : Optional[int] )-> Any: lowerCamelCase__ : Dict =MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCamelCase ) lowerCamelCase__ : Any =self.default_image_processor lowerCamelCase__ : Tuple =prepare_img() lowerCamelCase__ : List[str] =image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ : str =model(**lowerCamelCase ) # verify the logits lowerCamelCase__ : str =torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[int] ): """simple docstring""" if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCamelCase__ : Any =numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products lowerCamelCase__ : Dict =numbers[i] if number < 0: lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number ) lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase ) return max_prod
625
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : List[str] = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'deta' _a = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Dict, lowerCamelCase : Optional[Any]=None, lowerCamelCase : int=900, lowerCamelCase : Dict=2048, lowerCamelCase : Optional[Any]=6, lowerCamelCase : Any=2048, lowerCamelCase : Tuple=8, lowerCamelCase : Optional[int]=6, lowerCamelCase : Union[str, Any]=1024, lowerCamelCase : str=8, lowerCamelCase : int=0.0, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Tuple="relu", lowerCamelCase : Optional[Any]=256, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Union[str, Any]=0.0, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : List[str]=1.0, lowerCamelCase : Dict=True, lowerCamelCase : str=False, lowerCamelCase : Any="sine", lowerCamelCase : str=5, lowerCamelCase : str=4, lowerCamelCase : Dict=4, lowerCamelCase : Dict=True, lowerCamelCase : Dict=300, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=1, lowerCamelCase : Optional[int]=5, lowerCamelCase : Optional[int]=2, lowerCamelCase : Any=1, lowerCamelCase : Tuple=1, lowerCamelCase : Tuple=5, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : List[Any]=0.25, **lowerCamelCase : int, )-> int: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCamelCase__ : Optional[int] =CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =backbone_config.pop('''model_type''' ) lowerCamelCase__ : List[Any] =CONFIG_MAPPING[backbone_model_type] lowerCamelCase__ : Optional[Any] =config_class.from_dict(lowerCamelCase ) lowerCamelCase__ : Tuple =backbone_config lowerCamelCase__ : List[Any] =num_queries lowerCamelCase__ : Optional[Any] =max_position_embeddings lowerCamelCase__ : Union[str, Any] =d_model lowerCamelCase__ : Tuple =encoder_ffn_dim lowerCamelCase__ : List[Any] =encoder_layers lowerCamelCase__ : Union[str, Any] =encoder_attention_heads lowerCamelCase__ : List[str] =decoder_ffn_dim lowerCamelCase__ : Tuple =decoder_layers lowerCamelCase__ : str =decoder_attention_heads lowerCamelCase__ : Any =dropout lowerCamelCase__ : str =attention_dropout lowerCamelCase__ : List[Any] =activation_dropout lowerCamelCase__ : Any =activation_function lowerCamelCase__ : Any =init_std lowerCamelCase__ : int =init_xavier_std lowerCamelCase__ : Optional[int] =encoder_layerdrop lowerCamelCase__ : Dict =auxiliary_loss lowerCamelCase__ : Any =position_embedding_type # deformable attributes lowerCamelCase__ : Dict =num_feature_levels lowerCamelCase__ : Dict =encoder_n_points lowerCamelCase__ : List[str] =decoder_n_points lowerCamelCase__ : Optional[int] =two_stage lowerCamelCase__ : Optional[Any] =two_stage_num_proposals lowerCamelCase__ : str =with_box_refine lowerCamelCase__ : Tuple =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowerCamelCase__ : Any =class_cost lowerCamelCase__ : Union[str, Any] =bbox_cost lowerCamelCase__ : Union[str, Any] =giou_cost # Loss coefficients lowerCamelCase__ : Tuple =mask_loss_coefficient lowerCamelCase__ : Optional[Any] =dice_loss_coefficient lowerCamelCase__ : Union[str, Any] =bbox_loss_coefficient lowerCamelCase__ : Tuple =giou_loss_coefficient lowerCamelCase__ : Optional[Any] =eos_coefficient lowerCamelCase__ : Dict =focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase ) @property def snake_case ( self : Optional[int] )-> int: return self.encoder_attention_heads @property def snake_case ( self : Any )-> int: return self.d_model def snake_case ( self : Tuple )-> Dict: lowerCamelCase__ : Union[str, Any] =copy.deepcopy(self.__dict__ ) lowerCamelCase__ : Dict =self.backbone_config.to_dict() lowerCamelCase__ : Optional[int] =self.__class__.model_type return output
625
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 42 _a = 42 class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' _a = 42 _a = (1_6, 3_2, 9_6, 2_5_6) _a = jnp.floataa def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Tuple =nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) lowerCamelCase__ : Dict =[] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ : Dict =self.block_out_channels[i] lowerCamelCase__ : Dict =self.block_out_channels[i + 1] lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Optional[int] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Any =blocks lowerCamelCase__ : Optional[int] =nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase ) lowerCamelCase__ : Dict =nn.silu(lowerCamelCase ) for block in self.blocks: lowerCamelCase__ : str =block(lowerCamelCase ) lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase ) lowerCamelCase__ : Any =self.conv_out(lowerCamelCase ) return embedding @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' _a = 3_2 _a = 4 _a = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _a = False _a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _a = 2 _a = 8 _a = None _a = 1_2_8_0 _a = 0.0 _a = False _a = jnp.floataa _a = True _a = 0 _a = "rgb" _a = (1_6, 3_2, 9_6, 2_5_6) def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict: # init input tensors lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa ) lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase ) lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"] def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Optional[int] =self.block_out_channels lowerCamelCase__ : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : int =nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time lowerCamelCase__ : str =FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype ) lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) lowerCamelCase__ : Dict =self.only_cross_attention if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Dict =[] lowerCamelCase__ : List[Any] =block_out_channels[0] lowerCamelCase__ : List[Any] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] =output_channel lowerCamelCase__ : str =block_out_channels[i] lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: lowerCamelCase__ : List[Any] =FlaxDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(lowerCamelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) if not is_final_block: lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) lowerCamelCase__ : int =down_blocks lowerCamelCase__ : List[str] =controlnet_down_blocks # mid lowerCamelCase__ : Tuple =block_out_channels[-1] lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase__ : int =self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 ) # 1. time if not isinstance(lowerCamelCase, jnp.ndarray ): lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 ) lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase ) # 2. pre-process lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase ) lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ : Union[str, Any] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) # 5. contronet blocks lowerCamelCase__ : Optional[Any] =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ): lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : List[str] =controlnet_down_block_res_samples lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase ) # 6. scaling lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if index == number_of_items: return 0 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ : Dict =values[index] + knapsack( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["CLIPFeatureExtractor"] _lowercase : int = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" def wrapper(*__lowerCamelCase : str , **__lowerCamelCase : Tuple ): lowerCamelCase__ : Union[str, Any] =timeit.default_timer() lowerCamelCase__ : int =func(*__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : str =timeit.default_timer() - starttime return delta lowerCamelCase__ : Dict =func.__name__ return wrapper def snake_case__ ( __lowerCamelCase : dict , __lowerCamelCase : List[Any]=100 , __lowerCamelCase : int=None ): """simple docstring""" lowerCamelCase__ : Any =[] lowerCamelCase__ : Optional[Any] =seq_shapes or {} for i in range(__lowerCamelCase ): lowerCamelCase__ : List[Any] ={} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase , _ArrayXD ): lowerCamelCase__ : List[Any] =np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase , datasets.Value ): if v.dtype == "string": lowerCamelCase__ : str ='''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ : Union[str, Any] =np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase , datasets.Sequence ): while isinstance(__lowerCamelCase , datasets.Sequence ): lowerCamelCase__ : Union[str, Any] =v.feature lowerCamelCase__ : List[str] =seq_shapes[k] lowerCamelCase__ : Union[str, Any] =np.random.rand(*__lowerCamelCase ).astype(v.dtype ) lowerCamelCase__ : List[str] =data dummy_data.append((i, example) ) return dummy_data def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=100 , __lowerCamelCase : int=None ): """simple docstring""" lowerCamelCase__ : Optional[int] =generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer: for key, record in dummy_data: lowerCamelCase__ : List[Any] =features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : str =writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) lowerCamelCase__ : Tuple =datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
625
"""simple docstring""" import os def snake_case__ ( ): """simple docstring""" with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ : Tuple =str(file.readlines()[0] ) lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : str =0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Dict =0 return total_score if __name__ == "__main__": print(solution())
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ): """simple docstring""" lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase ) lowerCamelCase__ : str =range(1 , __lowerCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
625
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str, lowerCamelCase : int )-> None: lowerCamelCase__ : str =value lowerCamelCase__ : Node | None =None lowerCamelCase__ : Node | None =None class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Node )-> None: lowerCamelCase__ : Any =tree def snake_case ( self : str, lowerCamelCase : Node | None )-> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Dict )-> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict, lowerCamelCase : int )-> None: lowerCamelCase__ : Tuple =size lowerCamelCase__ : Dict =[0] * size lowerCamelCase__ : int =[0] * size @staticmethod def snake_case ( lowerCamelCase : int )-> int: return index | (index + 1) @staticmethod def snake_case ( lowerCamelCase : int )-> int: return (index & (index + 1)) - 1 def snake_case ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int )-> None: lowerCamelCase__ : int =value while index < self.size: lowerCamelCase__ : Any =self.get_prev(lowerCamelCase ) + 1 if current_left_border == index: lowerCamelCase__ : int =value else: lowerCamelCase__ : List[Any] =max(lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Dict =self.get_next(lowerCamelCase ) def snake_case ( self : int, lowerCamelCase : int, lowerCamelCase : int )-> int: right -= 1 # Because of right is exclusive lowerCamelCase__ : Optional[Any] =0 while left <= right: lowerCamelCase__ : Tuple =self.get_prev(lowerCamelCase ) if left <= current_left: lowerCamelCase__ : Union[str, Any] =max(lowerCamelCase, self.tree[right] ) lowerCamelCase__ : Union[str, Any] =current_left else: lowerCamelCase__ : Tuple =max(lowerCamelCase, self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[str] = logging.getLogger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): """simple docstring""" # save results if os.path.exists(__lowerCamelCase ): if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =2 if unlogit: lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase ) lowerCamelCase__ : Tuple =0 return -plogp.sum(dim=-1 ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) ) for row in range(len(__lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) if head_mask is None: lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =0.0 lowerCamelCase__ : Union[str, Any] =0.0 for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase__) , ) : Any =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase__ : int =2 lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCamelCase ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase__ : Dict =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase ) print_ad_tensor(__lowerCamelCase ) return attn_entropy, head_importance, total_loss def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase ) lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold ) lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase__ : List[Any] =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase__ : int =float('''Inf''' ) lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1] if len(__lowerCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 ) lowerCamelCase__ : Optional[Any] =0.0 lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase ) lowerCamelCase__ : Tuple =new_head_mask.clone().detach() print_ad_tensor(__lowerCamelCase ) # Compute metric and head importance again lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Any =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Tuple =1 / loss lowerCamelCase__ : Optional[Any] =datetime.now() - before_time lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] =[ v, ] assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCamelCase ) lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , ) lowerCamelCase__ : str =1 / loss lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCamelCase , args.output_dir ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase__ : List[Any] =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank ) lowerCamelCase__ : Any =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel( __lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase ) elif args.n_gpu > 1: lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase ) # Prepare dataset lowerCamelCase__ : Union[str, Any] =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),) lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase ) lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase ) lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
625
1
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : List[str] =FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa ) lowerCamelCase__ , lowerCamelCase__ : List[str] =FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa ) lowerCamelCase__ : Union[str, Any] =controlnet_params lowerCamelCase__ : Dict ='''bird''' lowerCamelCase__ : Optional[Any] =jax.device_count() lowerCamelCase__ : int =pipe.prepare_text_inputs([prompts] * num_samples ) lowerCamelCase__ : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ) lowerCamelCase__ : List[str] =pipe.prepare_image_inputs([canny_image] * num_samples ) lowerCamelCase__ : List[Any] =jax.random.PRNGKey(0 ) lowerCamelCase__ : Union[str, Any] =jax.random.split(lowerCamelCase, jax.device_count() ) lowerCamelCase__ : Any =replicate(lowerCamelCase ) lowerCamelCase__ : List[Any] =shard(lowerCamelCase ) lowerCamelCase__ : Dict =shard(lowerCamelCase ) lowerCamelCase__ : List[str] =pipe( prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) lowerCamelCase__ : int =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCamelCase__ : Optional[Any] =images[0, 253:256, 253:256, -1] lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase__ : List[Any] =jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def snake_case ( self : int )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa ) lowerCamelCase__ , lowerCamelCase__ : int =FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa ) lowerCamelCase__ : List[str] =controlnet_params lowerCamelCase__ : Union[str, Any] ='''Chef in the kitchen''' lowerCamelCase__ : List[Any] =jax.device_count() lowerCamelCase__ : int =pipe.prepare_text_inputs([prompts] * num_samples ) lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' ) lowerCamelCase__ : Optional[Any] =pipe.prepare_image_inputs([pose_image] * num_samples ) lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 ) lowerCamelCase__ : int =jax.random.split(lowerCamelCase, jax.device_count() ) lowerCamelCase__ : List[Any] =replicate(lowerCamelCase ) lowerCamelCase__ : int =shard(lowerCamelCase ) lowerCamelCase__ : str =shard(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =pipe( prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) lowerCamelCase__ : Tuple =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1] lowerCamelCase__ : Any =jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase__ : Dict =jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
625
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowerCamelCase__ : List[str] ='''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : List[Any] ='''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : int =tax_attention_key lowerCamelCase__ : Optional[int] =tax_attention_out lowerCamelCase__ : List[Any] =tax_attention_query lowerCamelCase__ : Optional[Any] =tax_attention_value lowerCamelCase__ : List[str] =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : Optional[int] =tax_mlp_wi_a lowerCamelCase__ : Optional[int] =tax_mlp_wi_a else: lowerCamelCase__ : Union[str, Any] =tax_mlp_wi lowerCamelCase__ : str =tax_mlp_wo lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : str =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowerCamelCase__ : List[Any] =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel'''] lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel'''] lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel'''] lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : Union[str, Any] =tax_attention_key lowerCamelCase__ : str =tax_attention_out lowerCamelCase__ : Optional[int] =tax_attention_query lowerCamelCase__ : Dict =tax_attention_value lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key lowerCamelCase__ : Any =tax_enc_dec_attention_out lowerCamelCase__ : Any =tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value lowerCamelCase__ : Dict =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : Tuple =tax_mlp_wi_a lowerCamelCase__ : int =tax_mlp_wi_a else: lowerCamelCase__ : List[Any] =tax_mlp_wi lowerCamelCase__ : Dict =tax_mlp_wo lowerCamelCase__ : Tuple =txa_mlp_layer_norm lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowerCamelCase__ : int =txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Tuple =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding'''] lowerCamelCase__ : Dict =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__lowerCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) _lowercase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
625
1
"""simple docstring""" from copy import deepcopy class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str], lowerCamelCase : list[int] | None = None, lowerCamelCase : int | None = None )-> None: if arr is None and size is not None: lowerCamelCase__ : Any =size lowerCamelCase__ : Optional[int] =[0] * size elif arr is not None: self.init(lowerCamelCase ) else: raise ValueError('''Either arr or size must be specified''' ) def snake_case ( self : Union[str, Any], lowerCamelCase : list[int] )-> None: lowerCamelCase__ : Tuple =len(lowerCamelCase ) lowerCamelCase__ : int =deepcopy(lowerCamelCase ) for i in range(1, self.size ): lowerCamelCase__ : Dict =self.next_(lowerCamelCase ) if j < self.size: self.tree[j] += self.tree[i] def snake_case ( self : Optional[int] )-> list[int]: lowerCamelCase__ : List[str] =self.tree[:] for i in range(self.size - 1, 0, -1 ): lowerCamelCase__ : Optional[Any] =self.next_(lowerCamelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def snake_case ( lowerCamelCase : int )-> int: return index + (index & (-index)) @staticmethod def snake_case ( lowerCamelCase : int )-> int: return index - (index & (-index)) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : int )-> None: if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value lowerCamelCase__ : List[str] =self.next_(lowerCamelCase ) def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : int )-> None: self.add(lowerCamelCase, value - self.get(lowerCamelCase ) ) def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int: if right == 0: return 0 lowerCamelCase__ : Dict =self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] lowerCamelCase__ : Union[str, Any] =self.prev(lowerCamelCase ) return result def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : int )-> int: return self.prefix(lowerCamelCase ) - self.prefix(lowerCamelCase ) def snake_case ( self : List[str], lowerCamelCase : int )-> int: return self.query(lowerCamelCase, index + 1 ) def snake_case ( self : List[Any], lowerCamelCase : int )-> int: value -= self.tree[0] if value < 0: return -1 lowerCamelCase__ : Optional[int] =1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 lowerCamelCase__ : Union[str, Any] =0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]: lowerCamelCase__ : List[str] =parent lowerCamelCase__ : Tuple =batch_size lowerCamelCase__ : str =image_size lowerCamelCase__ : Any =num_channels lowerCamelCase__ : Tuple =num_stages lowerCamelCase__ : List[str] =hidden_sizes lowerCamelCase__ : Any =depths lowerCamelCase__ : Union[str, Any] =is_training lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : int =intermediate_size lowerCamelCase__ : Optional[int] =hidden_act lowerCamelCase__ : Dict =type_sequence_label_size lowerCamelCase__ : Tuple =initializer_range lowerCamelCase__ : Any =out_features lowerCamelCase__ : Tuple =num_labels lowerCamelCase__ : Optional[int] =scope lowerCamelCase__ : Optional[int] =num_stages def snake_case ( self : str )-> Optional[int]: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple =None if self.use_labels: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : int =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Any: return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def snake_case ( self : Union[str, Any] )-> Any: return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, ) def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : int =model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any =config_and_inputs lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (UperNetForSemanticSegmentation,) if is_torch_available() else () _a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Optional[Any] =UperNetModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Optional[int] )-> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] )-> Dict: return def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) lowerCamelCase__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple =[*signature.parameters.keys()] lowerCamelCase__ : List[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : Any )-> Union[str, Any]: lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def snake_case ( self : Optional[Any] )-> List[Any]: pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def snake_case ( self : Any )-> List[str]: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : int )-> Any: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : Dict )-> str: pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def snake_case ( self : List[Any] )-> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : Tuple )-> str: pass def snake_case ( self : Optional[int] )-> List[str]: def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ): lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : List[str] =self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[Any] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : str =_config_zero_init(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def snake_case ( self : Any )-> str: pass @slow def snake_case ( self : int )-> Union[str, Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : str )-> Union[str, Any]: lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : List[Any] =prepare_img() lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Any =model(**lowerCamelCase ) lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : List[str] =torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
625
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def snake_case__ ( __lowerCamelCase : Union[str, Any] ): """simple docstring""" lowerCamelCase__ : List[Any] =SwinConfig(image_size=192 ) if "base" in model_name: lowerCamelCase__ : Optional[Any] =6 lowerCamelCase__ : str =128 lowerCamelCase__ : Optional[int] =(2, 2, 18, 2) lowerCamelCase__ : Tuple =(4, 8, 16, 32) elif "large" in model_name: lowerCamelCase__ : int =12 lowerCamelCase__ : Union[str, Any] =192 lowerCamelCase__ : str =(2, 2, 18, 2) lowerCamelCase__ : str =(6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCamelCase__ : Any =window_size lowerCamelCase__ : List[Any] =embed_dim lowerCamelCase__ : Any =depths lowerCamelCase__ : Optional[int] =num_heads return config def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" if "encoder.mask_token" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCamelCase__ : Optional[int] =name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCamelCase__ : int =name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCamelCase__ : Optional[int] =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : List[Any] =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : Optional[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : Tuple =name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCamelCase__ : int ='''layernorm.weight''' if name == "encoder.norm.bias": lowerCamelCase__ : int ='''layernorm.bias''' if "decoder" in name: pass else: lowerCamelCase__ : Union[str, Any] ='''swin.''' + name return name def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : str ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase ) if "attn_mask" in key: pass elif "qkv" in key: lowerCamelCase__ : List[Any] =key.split('''.''' ) lowerCamelCase__ : Any =int(key_split[2] ) lowerCamelCase__ : Optional[Any] =int(key_split[4] ) lowerCamelCase__ : Dict =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase__ : List[Any] =val[:dim, :] lowerCamelCase__ : List[Any] =val[ dim : dim * 2, : ] lowerCamelCase__ : List[str] =val[-dim:, :] else: lowerCamelCase__ : Optional[Any] =val[ :dim ] lowerCamelCase__ : Tuple =val[ dim : dim * 2 ] lowerCamelCase__ : Tuple =val[ -dim: ] else: lowerCamelCase__ : Optional[Any] =val return orig_state_dict def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model'''] lowerCamelCase__ : int =get_swin_config(__lowerCamelCase ) lowerCamelCase__ : Tuple =SwinForMaskedImageModeling(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase__ : str =ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCamelCase__ : List[Any] =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) lowerCamelCase__ : Optional[Any] =image_processor(images=__lowerCamelCase , return_tensors='''pt''' ) with torch.no_grad(): lowerCamelCase__ : str =model(**__lowerCamelCase ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Union[str, Any] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
625
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ): '''simple docstring''' _a = ['onnx'] def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]: requires_backends(self, ['''onnx'''] ) @classmethod def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]: requires_backends(cls, ['''onnx'''] ) @classmethod def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]: requires_backends(cls, ['''onnx'''] )
625
1
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path lowerCamelCase__ : List[str] =quote(__lowerCamelCase ) return hfh.hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' , revision=__lowerCamelCase )
625
"""simple docstring""" import colorsys from PIL import Image # type: ignore def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =x lowerCamelCase__ : Any =y for step in range(__lowerCamelCase ): # noqa: B007 lowerCamelCase__ : List[Any] =a * a - b * b + x lowerCamelCase__ : Optional[int] =2 * a * b + y lowerCamelCase__ : Union[str, Any] =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) ) lowerCamelCase__ : Optional[int] =img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
625
1
"""simple docstring""" from __future__ import annotations def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =VideoMAEConfig() set_architecture_configs(__lowerCamelCase , __lowerCamelCase ) if "finetuned" not in model_name: lowerCamelCase__ : int =False if "finetuned" in model_name: lowerCamelCase__ : str ='''huggingface/label-files''' if "kinetics" in model_name: lowerCamelCase__ : List[Any] =400 lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json''' elif "ssv2" in model_name: lowerCamelCase__ : Tuple =174 lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Dict =idalabel lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()} return config def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): """simple docstring""" if "small" in model_name: lowerCamelCase__ : Optional[Any] =384 lowerCamelCase__ : List[Any] =1536 lowerCamelCase__ : int =12 lowerCamelCase__ : Dict =16 lowerCamelCase__ : List[Any] =12 lowerCamelCase__ : Optional[Any] =3 lowerCamelCase__ : Union[str, Any] =192 lowerCamelCase__ : str =768 elif "large" in model_name: lowerCamelCase__ : Union[str, Any] =1024 lowerCamelCase__ : str =4096 lowerCamelCase__ : int =24 lowerCamelCase__ : Dict =16 lowerCamelCase__ : Union[str, Any] =12 lowerCamelCase__ : List[Any] =8 lowerCamelCase__ : int =512 lowerCamelCase__ : Optional[Any] =2048 elif "huge" in model_name: lowerCamelCase__ : Optional[int] =1280 lowerCamelCase__ : Optional[int] =5120 lowerCamelCase__ : List[Any] =32 lowerCamelCase__ : List[Any] =16 lowerCamelCase__ : Optional[Any] =12 lowerCamelCase__ : Dict =8 lowerCamelCase__ : List[Any] =640 lowerCamelCase__ : Any =2560 elif "base" not in model_name: raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" if "encoder." in name: lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' ) if "cls_token" in name: lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' ) if "attn" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' ) return name def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase ) if key.startswith('''encoder.''' ): lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' ) if "qkv" in key: lowerCamelCase__ : Any =key.split('''.''' ) if key.startswith('''decoder.blocks''' ): lowerCamelCase__ : Tuple =config.decoder_hidden_size lowerCamelCase__ : str =int(key_split[2] ) lowerCamelCase__ : Any ='''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase__ : List[Any] =val[:dim, :] lowerCamelCase__ : Any =val[dim : dim * 2, :] lowerCamelCase__ : Dict =val[-dim:, :] else: lowerCamelCase__ : Optional[Any] =config.hidden_size lowerCamelCase__ : Optional[Any] =int(key_split[1] ) lowerCamelCase__ : str ='''videomae.encoder.layer.''' if "weight" in key: lowerCamelCase__ : int =val[:dim, :] lowerCamelCase__ : Tuple =val[dim : dim * 2, :] lowerCamelCase__ : List[Any] =val[-dim:, :] else: lowerCamelCase__ : int =val return orig_state_dict def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : List[Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase ) return list(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase ) if "finetuned" in model_name: lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase ) else: lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase ) # download original checkpoint, hosted on Google Drive lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin''' gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' ) if "model" in files: lowerCamelCase__ : Dict =files['''model'''] else: lowerCamelCase__ : str =files['''module'''] lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # verify model on basic input lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) lowerCamelCase__ : int =prepare_video() lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' ) if "finetuned" not in model_name: lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase ) lowerCamelCase__ : int =model(**__lowerCamelCase ) lowerCamelCase__ : Dict =outputs.logits lowerCamelCase__ : List[str] =[ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase__ : int =torch.Size([1, 174] ) lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase__ : List[str] =torch.Size([1, 400] ) lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase__ : str =torch.Size([1, 400] ) lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase__ : Optional[int] =torch.Size([1, 174] ) lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase__ : str =torch.Size([1, 174] ) lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase__ : str =outputs.loss assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
625
1
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if isinstance(__lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]: pass def snake_case ( self : List[str] )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict: lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int: lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : int =output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : List[str] =after_output[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[str] =model( input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : int =output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : int =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] =output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any: pt_model.to(lowerCamelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ : Any =inputs_dict lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) pt_model_loaded.to(lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 ) def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]: lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Tuple =fx_state self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : int =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def snake_case ( self : Tuple )-> Any: lowerCamelCase__ : Tuple =self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def snake_case ( self : str )-> Any: lowerCamelCase__ : str =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' ) lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' ) lowerCamelCase__ : Tuple =config_inputs_dict self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase ) self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs() lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[str] =outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[Any] =after_outputs[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : List[str] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int: lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase ) lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =FlaxViTModelTester(self ) lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : Optional[Any] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : str =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict: lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self ) lowerCamelCase__ : List[Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase__ : Dict =processor( text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ) lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
625
"""simple docstring""" _lowercase : str = 0 # The first color of the flag. _lowercase : Dict = 1 # The second color of the flag. _lowercase : Tuple = 2 # The third color of the flag. _lowercase : Optional[int] = (red, white, blue) def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" if not sequence: return [] if len(__lowerCamelCase ) == 1: return list(__lowerCamelCase ) lowerCamelCase__ : List[Any] =0 lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1 lowerCamelCase__ : Tuple =0 while mid <= high: if sequence[mid] == colors[0]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid] high -= 1 else: lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(__lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip() _lowercase : int = [int(item.strip()) for item in user_input.split(",")] print(f'{dutch_national_flag_sort(unsorted)}')
625
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : list ): """simple docstring""" lowerCamelCase__ : str =np.array([[1, item, train_mtch[i]] for i, item in enumerate(__lowerCamelCase )] ) lowerCamelCase__ : Any =np.array(__lowerCamelCase ) lowerCamelCase__ : Tuple =np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __lowerCamelCase ) ) , x.transpose() ) , __lowerCamelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : list ): """simple docstring""" lowerCamelCase__ : Tuple =(1, 2, 1) lowerCamelCase__ : Any =(1, 1, 0, 7) lowerCamelCase__ : Optional[int] =SARIMAX( __lowerCamelCase , exog=__lowerCamelCase , order=__lowerCamelCase , seasonal_order=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model.fit(disp=__lowerCamelCase , maxiter=600 , method='''nm''' ) lowerCamelCase__ : int =model_fit.predict(1 , len(__lowerCamelCase ) , exog=[test_match] ) return result[0] def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : list ): """simple docstring""" lowerCamelCase__ : Tuple =SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] =regressor.predict(__lowerCamelCase ) return y_pred[0] def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" train_user.sort() lowerCamelCase__ : List[str] =np.percentile(__lowerCamelCase , 25 ) lowerCamelCase__ : Optional[int] =np.percentile(__lowerCamelCase , 75 ) lowerCamelCase__ : int =qa - qa lowerCamelCase__ : Optional[int] =qa - (iqr * 0.1) return low_lim def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : float ): """simple docstring""" lowerCamelCase__ : Optional[Any] =0 lowerCamelCase__ : Any =0 for i in list_vote: if i > actual_result: lowerCamelCase__ : List[str] =not_safe + 1 else: if abs(abs(__lowerCamelCase ) - abs(__lowerCamelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) _lowercase : Tuple = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]] _lowercase : Any = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) _lowercase : int = Normalizer().fit_transform(data_input_df.values) # split data _lowercase : Optional[Any] = normalize_df[:, 2].tolist() _lowercase : Any = normalize_df[:, 0].tolist() _lowercase : Optional[int] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) _lowercase : Optional[int] = normalize_df[:, [1, 2]].tolist() _lowercase : Optional[Any] = x[: len(x) - 1] _lowercase : str = x[len(x) - 1 :] # for linear regression & sarimax _lowercase : Tuple = total_date[: len(total_date) - 1] _lowercase : int = total_user[: len(total_user) - 1] _lowercase : Union[str, Any] = total_match[: len(total_match) - 1] _lowercase : int = total_date[len(total_date) - 1 :] _lowercase : Any = total_user[len(total_user) - 1 :] _lowercase : int = total_match[len(total_match) - 1 :] # voting system with forecasting _lowercase : Union[str, Any] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data _lowercase : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
625
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = StableUnCLIPImgaImgPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _a = frozenset([] ) def snake_case ( self : List[str] )-> str: lowerCamelCase__ : Dict =32 lowerCamelCase__ : Optional[Any] =embedder_hidden_size # image encoding components lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple =CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0 ) lowerCamelCase__ : Dict =UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, ) torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =DDIMScheduler( beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =AutoencoderKL() lowerCamelCase__ : int ={ # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]: if str(lowerCamelCase ).startswith('''mps''' ): lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase ) else: lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: lowerCamelCase__ : int =input_image * 0.5 + 0.5 lowerCamelCase__ : Dict =input_image.clamp(0, 1 ) lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : str =self.get_dummy_components() lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase ) lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase ) inputs.update({'''image_embeds''': None} ) lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self : int )-> Tuple: lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def snake_case ( self : List[str] )-> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[int] )-> int: lowerCamelCase__ : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : Optional[int] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : List[Any] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : Tuple =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : List[Any] =pipe( lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', ) lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
625
1
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : int )-> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case ( self : Tuple )-> str: lowerCamelCase__ : str =1 lowerCamelCase__ : Any =3 lowerCamelCase__ : Optional[Any] =(32, 32) lowerCamelCase__ : Tuple =floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowerCamelCase ) return image @property def snake_case ( self : str )-> List[Any]: torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) return model @property def snake_case ( self : Dict )-> List[str]: torch.manual_seed(0 ) lowerCamelCase__ : Dict =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) return model @property def snake_case ( self : int )-> str: torch.manual_seed(0 ) lowerCamelCase__ : List[Any] =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowerCamelCase ) @property def snake_case ( self : Optional[int] )-> int: def extract(*lowerCamelCase : List[Any], **lowerCamelCase : str ): class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int )-> Any: lowerCamelCase__ : Optional[int] =torch.ones([0] ) def snake_case ( self : Optional[int], lowerCamelCase : List[str] )-> int: self.pixel_values.to(lowerCamelCase ) return self return Out() return extract def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Union[str, Any] =self.dummy_cond_unet lowerCamelCase__ : str =DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, ) lowerCamelCase__ : Any =self.dummy_vae lowerCamelCase__ : Union[str, Any] =self.dummy_text_encoder lowerCamelCase__ : Any =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk lowerCamelCase__ : str =StableDiffusionPipeline( unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, ) lowerCamelCase__ : Union[str, Any] =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : int ='''A painting of a squirrel eating a burger''' lowerCamelCase__ : str =torch.Generator(device=lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : int =sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' ) lowerCamelCase__ : Optional[Any] =output.images lowerCamelCase__ : List[Any] =torch.Generator(device=lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : Tuple =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=lowerCamelCase, )[0] lowerCamelCase__ : Optional[int] =image[0, -3:, -3:, -1] lowerCamelCase__ : Any =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Any =np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : Union[str, Any] )-> Union[str, Any]: lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Optional[int] =self.dummy_cond_unet lowerCamelCase__ : Any =PNDMScheduler(skip_prk_steps=lowerCamelCase ) lowerCamelCase__ : Optional[int] =self.dummy_vae lowerCamelCase__ : int =self.dummy_text_encoder lowerCamelCase__ : Dict =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk lowerCamelCase__ : List[str] =StableDiffusionPipeline( unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, ) lowerCamelCase__ : List[str] =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Tuple ='''A painting of a squirrel eating a burger''' lowerCamelCase__ : Union[str, Any] =torch.Generator(device=lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : Optional[int] =sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' ) lowerCamelCase__ : List[Any] =output.images lowerCamelCase__ : Tuple =torch.Generator(device=lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : List[str] =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=lowerCamelCase, )[0] lowerCamelCase__ : int =image[0, -3:, -3:, -1] lowerCamelCase__ : Optional[int] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : int =np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : List[str] =StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=lowerCamelCase ) assert isinstance(lowerCamelCase, lowerCamelCase ) assert isinstance(pipe.scheduler, lowerCamelCase ) assert pipe.safety_checker is None lowerCamelCase__ : Any =pipe('''example prompt''', num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase ) lowerCamelCase__ : List[str] =StableDiffusionPipeline.from_pretrained(lowerCamelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCamelCase__ : List[str] =pipe('''example prompt''', num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' ) def snake_case ( self : int )-> Union[str, Any]: lowerCamelCase__ : List[Any] =self.dummy_cond_unet lowerCamelCase__ : Optional[Any] =PNDMScheduler(skip_prk_steps=lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =self.dummy_vae lowerCamelCase__ : Any =self.dummy_text_encoder lowerCamelCase__ : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 lowerCamelCase__ : List[Any] =unet.half() lowerCamelCase__ : Optional[Any] =vae.half() lowerCamelCase__ : Any =bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase__ : Tuple =StableDiffusionPipeline( unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, ) lowerCamelCase__ : Optional[Any] =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Optional[int] ='''A painting of a squirrel eating a burger''' lowerCamelCase__ : int =sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : Any )-> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Any )-> Any: lowerCamelCase__ : int =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=lowerCamelCase ) lowerCamelCase__ : List[Any] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCamelCase__ : str =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : List[str] =( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) lowerCamelCase__ : Optional[Any] =40_0366_0346 lowerCamelCase__ : str =7 # without safety guidance (sld_guidance_scale = 0) lowerCamelCase__ : Tuple =torch.manual_seed(lowerCamelCase ) lowerCamelCase__ : Any =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, ) lowerCamelCase__ : Optional[int] =output.images lowerCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1] lowerCamelCase__ : List[Any] =[0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) lowerCamelCase__ : int =torch.manual_seed(lowerCamelCase ) lowerCamelCase__ : int =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) lowerCamelCase__ : str =output.images lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1] lowerCamelCase__ : str =[0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : int )-> str: lowerCamelCase__ : str =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=lowerCamelCase ) lowerCamelCase__ : List[Any] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCamelCase__ : List[str] =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : int ='''padme amidala taking a bath artwork, safe for work, no nudity''' lowerCamelCase__ : int =27_3497_1755 lowerCamelCase__ : Dict =7 lowerCamelCase__ : Dict =torch.manual_seed(lowerCamelCase ) lowerCamelCase__ : List[str] =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, ) lowerCamelCase__ : Union[str, Any] =output.images lowerCamelCase__ : int =image[0, -3:, -3:, -1] lowerCamelCase__ : Optional[Any] =[0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 lowerCamelCase__ : Dict =torch.manual_seed(lowerCamelCase ) lowerCamelCase__ : int =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) lowerCamelCase__ : Any =output.images lowerCamelCase__ : Dict =image[0, -3:, -3:, -1] lowerCamelCase__ : Tuple =[0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : List[Any] )-> Any: lowerCamelCase__ : Optional[Any] =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) lowerCamelCase__ : Optional[Any] =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : int =( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) lowerCamelCase__ : Optional[int] =10_4435_5234 lowerCamelCase__ : Optional[int] =12 lowerCamelCase__ : Dict =torch.manual_seed(lowerCamelCase ) lowerCamelCase__ : Optional[int] =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, ) lowerCamelCase__ : Any =output.images lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1] lowerCamelCase__ : Optional[Any] =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 lowerCamelCase__ : Optional[int] =torch.manual_seed(lowerCamelCase ) lowerCamelCase__ : str =sd_pipe( [prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) lowerCamelCase__ : List[str] =output.images lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1] lowerCamelCase__ : List[Any] =np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 4000000 ): """simple docstring""" lowerCamelCase__ : Dict =[] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b return sum(__lowerCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
625
1
"""simple docstring""" import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate _lowercase : Any = trt.Logger(trt.Logger.WARNING) _lowercase : Optional[int] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) _lowercase : List[str] = logging.getLogger(__name__) _lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--onnx_model_path", default=None, type=str, required=True, help="Path to ONNX model: ", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--tokenizer_name", default="", type=str, required=True, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=3_8_4, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=1_2_8, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--n_best_size", default=2_0, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=3_0, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument("--seed", type=int, default=4_2, help="random seed for initialization") parser.add_argument( "--dataset_name", type=str, default=None, required=True, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data." ) parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision instead of 32-bit", ) parser.add_argument( "--int8", action="store_true", help="Whether to use INT8", ) _lowercase : Optional[int] = parser.parse_args() if args.tokenizer_name: _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) logger.info("Training/evaluation parameters %s", args) _lowercase : List[Any] = args.per_device_eval_batch_size _lowercase : List[Any] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties _lowercase : str = True _lowercase : int = "temp_engine/bert-fp32.engine" if args.fpaa: _lowercase : Optional[int] = "temp_engine/bert-fp16.engine" if args.inta: _lowercase : List[str] = "temp_engine/bert-int8.engine" # import ONNX file if not os.path.exists("temp_engine"): os.makedirs("temp_engine") _lowercase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, "rb") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network _lowercase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] _lowercase : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: _lowercase : Union[str, Any] = 1 << 5_0 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) _lowercase : List[Any] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) _lowercase : List[str] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, "wb") as f: f.write(engine.serialize()) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) lowerCamelCase__ : str =np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) lowerCamelCase__ : Dict =np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowerCamelCase ) # start time lowerCamelCase__ : Optional[int] =time.time() # Run inference context.execute_async( bindings=[int(__lowerCamelCase ) for d_inp in d_inputs] + [int(__lowerCamelCase ), int(__lowerCamelCase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Synchronize the stream and take time stream.synchronize() # end time lowerCamelCase__ : List[Any] =time.time() lowerCamelCase__ : Tuple =end_time - start_time lowerCamelCase__ : Optional[Any] =(h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. _lowercase : List[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. _lowercase : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("Evaluation requires a dataset name") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. _lowercase : List[Any] = raw_datasets["validation"].column_names _lowercase : str = "question" if "question" in column_names else column_names[0] _lowercase : List[Any] = "context" if "context" in column_names else column_names[1] _lowercase : Optional[int] = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). _lowercase : Union[str, Any] = tokenizer.padding_side == "right" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) _lowercase : Any = min(args.max_seq_length, tokenizer.model_max_length) def snake_case__ ( __lowerCamelCase : Dict ): """simple docstring""" # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace lowerCamelCase__ : str =[q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. lowerCamelCase__ : Any =tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=__lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. lowerCamelCase__ : List[Any] =tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. lowerCamelCase__ : Union[str, Any] =[] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). lowerCamelCase__ : Dict =tokenized_examples.sequence_ids(__lowerCamelCase ) lowerCamelCase__ : List[Any] =1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. lowerCamelCase__ : str =sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. lowerCamelCase__ : List[Any] =[ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples _lowercase : List[str] = raw_datasets["validation"] # Validation Feature Creation _lowercase : List[str] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on validation dataset", ) _lowercase : Optional[Any] = default_data_collator _lowercase : Union[str, Any] = eval_dataset.remove_columns(["example_id", "offset_mapping"]) _lowercase : int = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any="eval" ): """simple docstring""" # Post-processing: we match the start logits and end logits to answers in the original context. lowerCamelCase__ : List[Any] =postprocess_qa_predictions( examples=__lowerCamelCase , features=__lowerCamelCase , predictions=__lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: lowerCamelCase__ : int =[ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: lowerCamelCase__ : Optional[int] =[{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] lowerCamelCase__ : Tuple =[{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=__lowerCamelCase , label_ids=__lowerCamelCase ) _lowercase : int = load_metric("squad_v2" if args.version_2_with_negative else "squad") # Evaluation! logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path) with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case__ ( __lowerCamelCase : Tuple ): """simple docstring""" return trt.volume(engine.get_binding_shape(__lowerCamelCase ) ) * engine.get_binding_dtype(__lowerCamelCase ).itemsize # Allocate device memory for inputs and outputs. _lowercase : Optional[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer _lowercase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) _lowercase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) _lowercase : int = cuda.mem_alloc(h_outputa.nbytes) _lowercase : Optional[int] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. _lowercase : List[str] = cuda.Stream() # Evaluation logger.info("***** Running Evaluation *****") logger.info(f' Num examples = {len(eval_dataset)}') logger.info(f' Batch size = {args.per_device_eval_batch_size}') _lowercase : str = 0.0 _lowercase : Tuple = 0 _lowercase : Optional[Any] = timeit.default_timer() _lowercase : Optional[Any] = None for step, batch in enumerate(eval_dataloader): _lowercase , _lowercase : List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 _lowercase , _lowercase : Any = outputs _lowercase : str = torch.tensor(start_logits) _lowercase : Dict = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered _lowercase : Union[str, Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0) _lowercase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0) _lowercase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) _lowercase : Tuple = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0) if all_preds is not None: _lowercase : Optional[int] = nested_truncate(all_preds, len(eval_dataset)) _lowercase : List[Any] = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_0_0_0 / niter)) logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_0_0_0)) logger.info("Total Number of Inference = %d", niter) _lowercase : List[str] = post_processing_function(eval_examples, eval_dataset, all_preds) _lowercase : Dict = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'Evaluation metrics: {eval_metric}')
625
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = BlenderbotSmallConfig _a = {} _a = 'gelu' def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]: lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =batch_size lowerCamelCase__ : Optional[int] =seq_length lowerCamelCase__ : Tuple =is_training lowerCamelCase__ : Dict =use_labels lowerCamelCase__ : List[Any] =vocab_size lowerCamelCase__ : str =hidden_size lowerCamelCase__ : str =num_hidden_layers lowerCamelCase__ : Union[str, Any] =num_attention_heads lowerCamelCase__ : Any =intermediate_size lowerCamelCase__ : Dict =hidden_dropout_prob lowerCamelCase__ : List[Any] =attention_probs_dropout_prob lowerCamelCase__ : str =max_position_embeddings lowerCamelCase__ : Optional[int] =eos_token_id lowerCamelCase__ : str =pad_token_id lowerCamelCase__ : Union[str, Any] =bos_token_id def snake_case ( self : Any )-> Any: lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 ) lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : int =self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return config, inputs_dict def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder() lowerCamelCase__ : List[Any] =inputs_dict['''input_ids'''] lowerCamelCase__ : Optional[int] =input_ids[:1, :] lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :] lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask'''] lowerCamelCase__ : Optional[Any] =1 # first forward pass lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 ) lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0] lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) ) lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ): """simple docstring""" if attention_mask is None: lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ : str =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _a = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _a = True _a = False _a = False def snake_case ( self : Any )-> str: lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase ) def snake_case ( self : Any )-> Optional[int]: self.config_tester.run_common_tests() def snake_case ( self : int )-> str: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] _a = 'facebook/blenderbot_small-90M' @cached_property def snake_case ( self : Any )-> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def snake_case ( self : int )-> List[Any]: lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' ) lowerCamelCase__ : Any =self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, ) lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
625
1
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _lowercase : Any = logging.getLogger() def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Any =argparse.ArgumentParser() parser.add_argument('''-f''' ) lowerCamelCase__ : Dict =parser.parse_args() return args.f class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def snake_case ( self : Dict )-> None: lowerCamelCase__ : Dict =logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCamelCase ) def snake_case ( self : Any, lowerCamelCase : Optional[int] )-> Any: lowerCamelCase__ : List[Any] =get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0, '''run_glue_deebert.py''' ) with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ): lowerCamelCase__ : Tuple =run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowerCamelCase, 0.666 ) @slow @require_torch_non_multi_gpu def snake_case ( self : Tuple )-> int: lowerCamelCase__ : List[Any] =''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(lowerCamelCase ) lowerCamelCase__ : Any =''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(lowerCamelCase )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ): """simple docstring""" # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ): """simple docstring""" # Base Case if curr_ind == len(__lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__lowerCamelCase ) ): if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): # Insert current vertex into path as next transition lowerCamelCase__ : Tuple =next_ver # Validate created path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ): return True # Backtrack lowerCamelCase__ : int =-1 return False def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ): """simple docstring""" lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1) # initialize start and end of path with starting index lowerCamelCase__ : Union[str, Any] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : List[Any] = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys _lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 2_5_0_0_0_4 _lowercase : Optional[Any] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = MBartTokenizer _a = MBartTokenizerFast _a = True _a = True def snake_case ( self : Tuple )-> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict )-> Union[str, Any]: lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def snake_case ( self : Tuple )-> List[Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : List[str] =tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ : Dict =tempfile.mkdtemp() lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ : Optional[int] =tempfile.mkdtemp() lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = 'facebook/mbart-large-en-ro' _a = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _a = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case ( cls : List[Any] )-> Optional[int]: lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) lowerCamelCase__ : Optional[int] =1 return cls def snake_case ( self : Optional[Any] )-> List[str]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 ) def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) def snake_case ( self : Optional[Any] )-> str: self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids ) lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase, lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], lowerCamelCase ) lowerCamelCase__ : Dict =10 lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2], 2 ) self.assertEqual(ids[-1], lowerCamelCase ) self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : int )-> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] ) def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : int =tempfile.mkdtemp() lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase ) @require_torch def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' ) lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case ( self : Optional[Any] )-> Any: lowerCamelCase__ : str =self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase, lowerCamelCase ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) lowerCamelCase__ : Any =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' ) lowerCamelCase__ : Tuple =self.tokenizer( text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' ) lowerCamelCase__ : Union[str, Any] =targets['''input_ids'''] lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : str =self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCamelCase ), { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 25_0004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_0001, }, )
625
1
"""simple docstring""" import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : List[str], lowerCamelCase : Any=2, lowerCamelCase : Optional[int]=32, lowerCamelCase : str=16, lowerCamelCase : int=3, lowerCamelCase : List[str]=True, lowerCamelCase : List[str]=True, lowerCamelCase : str=32, lowerCamelCase : int=4, lowerCamelCase : Optional[int]=[0, 1, 2, 3], lowerCamelCase : str=4, lowerCamelCase : Optional[int]=37, lowerCamelCase : str="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : str=3, lowerCamelCase : Dict=[1, 384, 24, 24], lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, )-> Tuple: lowerCamelCase__ : Optional[Any] =parent lowerCamelCase__ : Optional[Any] =batch_size lowerCamelCase__ : Tuple =image_size lowerCamelCase__ : Optional[int] =patch_size lowerCamelCase__ : Union[str, Any] =num_channels lowerCamelCase__ : Any =is_training lowerCamelCase__ : int =use_labels lowerCamelCase__ : Dict =hidden_size lowerCamelCase__ : Optional[Any] =num_hidden_layers lowerCamelCase__ : Optional[Any] =backbone_out_indices lowerCamelCase__ : Optional[int] =num_attention_heads lowerCamelCase__ : int =intermediate_size lowerCamelCase__ : int =hidden_act lowerCamelCase__ : str =hidden_dropout_prob lowerCamelCase__ : Optional[Any] =attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] =initializer_range lowerCamelCase__ : str =num_labels lowerCamelCase__ : List[str] =backbone_featmap_shape lowerCamelCase__ : Dict =scope lowerCamelCase__ : Optional[Any] =is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Optional[Any] =(image_size // patch_size) ** 2 lowerCamelCase__ : List[str] =num_patches + 1 def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] =None if self.use_labels: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) lowerCamelCase__ : List[str] =self.get_config() return config, pixel_values, labels def snake_case ( self : int )-> Tuple: lowerCamelCase__ : List[Any] ={ '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 192, 384, 768], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=lowerCamelCase, backbone_featmap_shape=self.backbone_featmap_shape, ) def snake_case ( self : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Any =DPTModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : List[Any] =model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Any, lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : Dict )-> int: lowerCamelCase__ : List[Any] =self.num_labels lowerCamelCase__ : int =DPTForDepthEstimation(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] =model(lowerCamelCase ) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) ) def snake_case ( self : str, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : str )-> int: lowerCamelCase__ : str =self.num_labels lowerCamelCase__ : Tuple =DPTForSemanticSegmentation(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple =model(lowerCamelCase, labels=lowerCamelCase ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self : List[Any] )-> str: lowerCamelCase__ : List[Any] =self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =config_and_inputs lowerCamelCase__ : Union[str, Any] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _a = ( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) _a = False _a = False _a = False def snake_case ( self : str )-> Tuple: lowerCamelCase__ : Any =DPTModelTester(self ) lowerCamelCase__ : Optional[Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Union[str, Any] )-> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : int )-> Union[str, Any]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowerCamelCase__ : Optional[Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) ) def snake_case ( self : str )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any =model_class(lowerCamelCase ) lowerCamelCase__ : int =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any =[*signature.parameters.keys()] lowerCamelCase__ : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def snake_case ( self : Optional[Any] )-> List[Any]: lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase ) def snake_case ( self : Any )-> Optional[Any]: lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) def snake_case ( self : int )-> Any: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict =True if model_class in get_values(lowerCamelCase ): continue lowerCamelCase__ : List[Any] =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.train() lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase ) lowerCamelCase__ : List[str] =model(**lowerCamelCase ).loss loss.backward() def snake_case ( self : Any )-> Optional[Any]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] =False lowerCamelCase__ : Optional[int] =True if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue lowerCamelCase__ : Any =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.gradient_checkpointing_enable() model.train() lowerCamelCase__ : Tuple =self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase ) lowerCamelCase__ : Tuple =model(**lowerCamelCase ).loss loss.backward() def snake_case ( self : Any )-> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] =_config_zero_init(lowerCamelCase ) for model_class in self.all_model_classes: lowerCamelCase__ : List[str] =model_class(config=lowerCamelCase ) # Skip the check for the backbone lowerCamelCase__ : List[Any] =[] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": lowerCamelCase__ : Optional[Any] =[F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : str )-> Optional[Any]: pass @slow def snake_case ( self : Tuple )-> Any: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: lowerCamelCase__ : Optional[int] =DPTModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case ( self : Any )-> List[str]: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] ='''add''' with self.assertRaises(lowerCamelCase ): lowerCamelCase__ : Optional[int] =DPTForDepthEstimation(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[Any] )-> Optional[int]: lowerCamelCase__ : Any =DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) lowerCamelCase__ : List[str] =DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(lowerCamelCase ) lowerCamelCase__ : Any =prepare_img() lowerCamelCase__ : Optional[Any] =image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[str] =model(**lowerCamelCase ) lowerCamelCase__ : Dict =outputs.predicted_depth # verify the predicted depth lowerCamelCase__ : Tuple =torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape, lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.tensor( [[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, lowerCamelCase, atol=1E-4 ) )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
625
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : float | Decimal , __lowerCamelCase : float = 10**-10 ): """simple docstring""" lowerCamelCase__ : Optional[int] =a while True: lowerCamelCase__ : List[Any] =Decimal(__lowerCamelCase ) - ( Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307 return float(__lowerCamelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ): """simple docstring""" lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase ) lowerCamelCase__ : str =range(1 , __lowerCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
625
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowercase : Dict = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowercase : Optional[Any] = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowercase : Dict = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : Optional[int] =len([g for position, g in enumerate(__lowerCamelCase ) if g == main_target[position]] ) return (item, float(__lowerCamelCase )) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : Tuple =random.randint(0 , len(__lowerCamelCase ) - 1 ) lowerCamelCase__ : Union[str, Any] =parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase__ : List[Any] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ): """simple docstring""" lowerCamelCase__ : Any =list(__lowerCamelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCamelCase__ : Optional[int] =random.choice(__lowerCamelCase ) return "".join(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : tuple[str, float] , __lowerCamelCase : list[tuple[str, float]] , __lowerCamelCase : list[str] , ): """simple docstring""" lowerCamelCase__ : Dict =[] # Generate more children proportionally to the fitness score. lowerCamelCase__ : Any =int(parent_a[1] * 100 ) + 1 lowerCamelCase__ : Tuple =10 if child_n >= 10 else child_n for _ in range(__lowerCamelCase ): lowerCamelCase__ : List[str] =population_score[random.randint(0 , __lowerCamelCase )][0] lowerCamelCase__ , lowerCamelCase__ : Dict =crossover(parent_a[0] , __lowerCamelCase ) # Append new string to the population list. pop.append(mutate(__lowerCamelCase , __lowerCamelCase ) ) pop.append(mutate(__lowerCamelCase , __lowerCamelCase ) ) return pop def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] , __lowerCamelCase : bool = True ): """simple docstring""" # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCamelCase__ : List[Any] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__lowerCamelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase__ : Dict =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCamelCase__ : Tuple =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__lowerCamelCase ) # Generate random starting population. lowerCamelCase__ : List[str] =[] for _ in range(__lowerCamelCase ): population.append(''''''.join([random.choice(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__lowerCamelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase__ : Tuple =[evaluate(__lowerCamelCase , __lowerCamelCase ) for item in population] # Check if there is a matching evolution. lowerCamelCase__ : Optional[int] =sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase__ : List[str] =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__lowerCamelCase ) # Normalize population score to be between 0 and 1. lowerCamelCase__ : List[str] =[ (item, score / len(__lowerCamelCase )) for item, score in population_score ] # This is selection for i in range(__lowerCamelCase ): population.extend(select(population_score[int(__lowerCamelCase )] , __lowerCamelCase , __lowerCamelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__lowerCamelCase ) > N_POPULATION: break if __name__ == "__main__": _lowercase : Any = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _lowercase : Any = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _lowercase , _lowercase , _lowercase : List[str] = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
625
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if isinstance(__lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]: pass def snake_case ( self : List[str] )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict: lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int: lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : int =output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : List[str] =after_output[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[str] =model( input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : int =output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : int =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] =output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any: pt_model.to(lowerCamelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ : Any =inputs_dict lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) pt_model_loaded.to(lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 ) def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]: lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Tuple =fx_state self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : int =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def snake_case ( self : Tuple )-> Any: lowerCamelCase__ : Tuple =self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def snake_case ( self : str )-> Any: lowerCamelCase__ : str =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' ) lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' ) lowerCamelCase__ : Tuple =config_inputs_dict self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase ) self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs() lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[str] =outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[Any] =after_outputs[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : List[str] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int: lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase ) lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =FlaxViTModelTester(self ) lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : Optional[Any] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : str =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict: lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self ) lowerCamelCase__ : List[Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase__ : Dict =processor( text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ) lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
625
1
"""simple docstring""" from math import factorial _lowercase : Any = {str(d): factorial(d) for d in range(1_0)} def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCamelCase ) ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : List[Any] =7 * factorial(9 ) + 1 return sum(i for i in range(3 , __lowerCamelCase ) if sum_of_digit_factorial(__lowerCamelCase ) == i ) if __name__ == "__main__": print(f'{solution() = }')
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if index == number_of_items: return 0 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ : Dict =values[index] + knapsack( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" import os def snake_case__ ( __lowerCamelCase : str = "matrix.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) as in_file: lowerCamelCase__ : str =in_file.read() lowerCamelCase__ : List[str] =[[int(__lowerCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()] lowerCamelCase__ : Optional[Any] =[[0 for cell in row] for row in grid] lowerCamelCase__ : Union[str, Any] =len(grid[0] ) lowerCamelCase__ : Union[str, Any] =[[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )] lowerCamelCase__ : Optional[Any] =grid[0][0] for i in range(1 , __lowerCamelCase ): lowerCamelCase__ : Dict =grid[0][i] + dp[0][i - 1] for i in range(1 , __lowerCamelCase ): lowerCamelCase__ : Dict =grid[i][0] + dp[i - 1][0] for i in range(1 , __lowerCamelCase ): for j in range(1 , __lowerCamelCase ): lowerCamelCase__ : Optional[Any] =grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'{solution() = }')
625
"""simple docstring""" _lowercase : Optional[Any] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
625
1
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient _lowercase : Tuple = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def snake_case__ ( __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : int =test_results.split(''' ''' ) lowerCamelCase__ : str =0 lowerCamelCase__ : List[Any] =0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. lowerCamelCase__ : List[Any] =expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(__lowerCamelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : int ={} lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =False for line in failures_short_lines.split('''\n''' ): if re.search(R'''_ \[doctest\]''' , __lowerCamelCase ): lowerCamelCase__ : int =True lowerCamelCase__ : Optional[int] =line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): lowerCamelCase__ : Dict =line lowerCamelCase__ : List[Any] =False return failures class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : str, lowerCamelCase : Dict )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =title lowerCamelCase__ : int =doc_test_results['''time_spent'''].split(''',''' )[0] lowerCamelCase__ : Dict =doc_test_results['''success'''] lowerCamelCase__ : List[Any] =doc_test_results['''failures'''] lowerCamelCase__ : List[str] =self.n_success + self.n_failures # Failures and success of the modeling tests lowerCamelCase__ : List[Any] =doc_test_results @property def snake_case ( self : Optional[Any] )-> str: lowerCamelCase__ : Optional[int] =[self._time_spent] lowerCamelCase__ : Optional[Any] =0 for time in time_spent: lowerCamelCase__ : Union[str, Any] =time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCamelCase ) == 1: lowerCamelCase__ : Any =[0, 0, time_parts[0]] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F'''{int(lowerCamelCase )}h{int(lowerCamelCase )}m{int(lowerCamelCase )}s''' @property def snake_case ( self : List[Any] )-> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def snake_case ( self : Dict )-> Dict: return { "type": "section", "text": { "type": "plain_text", "text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def snake_case ( self : List[Any] )-> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' F''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def snake_case ( self : Any )-> Dict: lowerCamelCase__ : Union[str, Any] =40 lowerCamelCase__ : List[str] ={k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowerCamelCase, lowerCamelCase )} lowerCamelCase__ : Optional[Any] ='''''' for category, failures in category_failures.items(): if len(lowerCamelCase ) == 0: continue if report != "": report += "\n\n" report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowerCamelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'''The following examples had failures:\n\n\n{report}\n''', }, } @property def snake_case ( self : Any )-> str: lowerCamelCase__ : int =[self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowerCamelCase ) @staticmethod def snake_case ( )-> int: lowerCamelCase__ : Tuple =[ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(lowerCamelCase )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=lowerCamelCase, ) def snake_case ( self : Optional[Any] )-> Dict: print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) lowerCamelCase__ : Dict =F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.''' lowerCamelCase__ : str =client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=lowerCamelCase, ) def snake_case ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : int )-> Dict: lowerCamelCase__ : Optional[int] ='''''' for key, value in failures.items(): lowerCamelCase__ : List[str] =value[:200] + ''' [Truncated]''' if len(lowerCamelCase ) > 250 else value failures_text += F'''*{key}*\n_{value}_\n\n''' lowerCamelCase__ : Optional[Any] =job_name lowerCamelCase__ : Optional[Any] ={'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: lowerCamelCase__ : List[Any] ={ '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def snake_case ( self : Tuple )-> str: if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) lowerCamelCase__ : Dict =self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) lowerCamelCase__ : Optional[int] =sorted(self.doc_test_results.items(), key=lambda lowerCamelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): lowerCamelCase__ : int =F'''*Num failures* :{len(job_result["failed"] )} \n''' lowerCamelCase__ : Dict =job_result['''failures'''] lowerCamelCase__ : Any =self.get_reply_blocks(lowerCamelCase, lowerCamelCase, lowerCamelCase, text=lowerCamelCase ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=F'''Results for {job}''', blocks=lowerCamelCase, thread_ts=self.thread_ts['''ts'''], ) time.sleep(1 ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : int =os.environ['''GITHUB_RUN_ID'''] lowerCamelCase__ : str =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' lowerCamelCase__ : Any =requests.get(__lowerCamelCase ).json() lowerCamelCase__ : Optional[Any] ={} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) lowerCamelCase__ : Tuple =math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple =requests.get(url + f'''&page={i + 2}''' ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , __lowerCamelCase ) return {} def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" lowerCamelCase__ : Optional[Any] ={} if os.path.exists(__lowerCamelCase ): lowerCamelCase__ : Tuple =os.listdir(__lowerCamelCase ) for file in files: try: with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , encoding='''utf-8''' ) as f: lowerCamelCase__ : Union[str, Any] =f.read() except UnicodeDecodeError as e: raise ValueError(f'''Could not open {os.path.join(__lowerCamelCase , __lowerCamelCase )}.''' ) from e return _artifact def snake_case__ ( ): """simple docstring""" class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Tuple, lowerCamelCase : str )-> int: lowerCamelCase__ : Tuple =name lowerCamelCase__ : Optional[Any] =[] def __str__( self : int )-> Dict: return self.name def snake_case ( self : Dict, lowerCamelCase : str )-> Tuple: self.paths.append({'''name''': self.name, '''path''': path} ) lowerCamelCase__ : Dict[str, Artifact] ={} lowerCamelCase__ : List[str] =filter(os.path.isdir , os.listdir() ) for directory in directories: lowerCamelCase__ : List[str] =directory if artifact_name not in _available_artifacts: lowerCamelCase__ : List[str] =Artifact(__lowerCamelCase ) _available_artifacts[artifact_name].add_path(__lowerCamelCase ) return _available_artifacts if __name__ == "__main__": _lowercase : Optional[int] = get_job_links() _lowercase : Any = retrieve_available_artifacts() _lowercase : Tuple = collections.OrderedDict( [ ("*.py", "API Examples"), ("*.md", "MD Examples"), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' _lowercase : Optional[Any] = { v: { "failed": [], "failures": {}, } for v in docs.values() } # Link to the GitHub Action job _lowercase : Optional[Any] = github_actions_job_links.get("run_doctests") _lowercase : int = available_artifacts["doc_tests_gpu_test_reports"].paths[0] _lowercase : List[Any] = retrieve_artifact(artifact_path["name"]) if "stats" in artifact: _lowercase , _lowercase , _lowercase : int = handle_test_results(artifact["stats"]) _lowercase : List[Any] = failed _lowercase : Tuple = success _lowercase : Any = time_spent[1:-1] + ", " _lowercase : List[str] = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): _lowercase : Union[str, Any] = line.replace("FAILED ", "") _lowercase : Optional[Any] = line.split()[0].replace("\n", "") if "::" in line: _lowercase , _lowercase : Tuple = line.split("::") else: _lowercase , _lowercase : Any = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): _lowercase : List[str] = docs[file_regex] doc_test_results[category]["failed"].append(test) _lowercase : Any = all_failures[test] if test in all_failures else "N/A" _lowercase : Any = failure break _lowercase : Any = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[int] ): """simple docstring""" if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCamelCase__ : Any =numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products lowerCamelCase__ : Dict =numbers[i] if number < 0: lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number ) lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase ) return max_prod
625
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" lowerCamelCase__ : Tuple =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : str =emb.weight.shape lowerCamelCase__ : List[Any] =nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =emb.weight.data return lin_layer def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]="facebook/mbart-large-en-ro" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=False ): """simple docstring""" lowerCamelCase__ : Optional[int] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model'''] remove_ignore_keys_(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =state_dict['''encoder.embed_tokens.weight'''].shape[0] lowerCamelCase__ : Dict =MBartConfig.from_pretrained(__lowerCamelCase , vocab_size=__lowerCamelCase ) if mbart_aa and finetuned: lowerCamelCase__ : List[str] ='''relu''' lowerCamelCase__ : List[str] =state_dict['''decoder.embed_tokens.weight'''] lowerCamelCase__ : List[Any] =MBartForConditionalGeneration(__lowerCamelCase ) model.model.load_state_dict(__lowerCamelCase ) if finetuned: lowerCamelCase__ : Optional[int] =make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") _lowercase : Any = parser.parse_args() _lowercase : List[Any] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
625
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 42 _a = 42 class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' _a = 42 _a = (1_6, 3_2, 9_6, 2_5_6) _a = jnp.floataa def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Tuple =nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) lowerCamelCase__ : Dict =[] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ : Dict =self.block_out_channels[i] lowerCamelCase__ : Dict =self.block_out_channels[i + 1] lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Optional[int] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Any =blocks lowerCamelCase__ : Optional[int] =nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase ) lowerCamelCase__ : Dict =nn.silu(lowerCamelCase ) for block in self.blocks: lowerCamelCase__ : str =block(lowerCamelCase ) lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase ) lowerCamelCase__ : Any =self.conv_out(lowerCamelCase ) return embedding @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' _a = 3_2 _a = 4 _a = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _a = False _a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _a = 2 _a = 8 _a = None _a = 1_2_8_0 _a = 0.0 _a = False _a = jnp.floataa _a = True _a = 0 _a = "rgb" _a = (1_6, 3_2, 9_6, 2_5_6) def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict: # init input tensors lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa ) lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase ) lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"] def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Optional[int] =self.block_out_channels lowerCamelCase__ : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : int =nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time lowerCamelCase__ : str =FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype ) lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) lowerCamelCase__ : Dict =self.only_cross_attention if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Dict =[] lowerCamelCase__ : List[Any] =block_out_channels[0] lowerCamelCase__ : List[Any] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] =output_channel lowerCamelCase__ : str =block_out_channels[i] lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: lowerCamelCase__ : List[Any] =FlaxDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(lowerCamelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) if not is_final_block: lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) lowerCamelCase__ : int =down_blocks lowerCamelCase__ : List[str] =controlnet_down_blocks # mid lowerCamelCase__ : Tuple =block_out_channels[-1] lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase__ : int =self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 ) # 1. time if not isinstance(lowerCamelCase, jnp.ndarray ): lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 ) lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase ) # 2. pre-process lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase ) lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ : Union[str, Any] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) # 5. contronet blocks lowerCamelCase__ : Optional[Any] =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ): lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : List[str] =controlnet_down_block_res_samples lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase ) # 6. scaling lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
625
1
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(">=", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase : int = get_logger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=0 ): """simple docstring""" os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ : Union[str, Any] =model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ : List[str] =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' lowerCamelCase__ : Tuple =os.path.join(__lowerCamelCase , __lowerCamelCase ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ : Union[str, Any] =( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCamelCase__ : str =os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ : str =os.path.join(__lowerCamelCase , f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) logger.info(f'''Saving model to {ckpt_dir}''' ) lowerCamelCase__ : List[Any] ={'''model''': state_dict} dist_cp.save_state_dict( state_dict=__lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f'''Model saved to {ckpt_dir}''' ) def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__lowerCamelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return lowerCamelCase__ : List[str] =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' lowerCamelCase__ : Any =os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Loading model from {input_model_file}''' ) lowerCamelCase__ : str =torch.load(__lowerCamelCase ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ : Tuple =( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCamelCase__ : Optional[Any] =os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Loading model from {input_model_file}''' ) lowerCamelCase__ : int =torch.load(__lowerCamelCase ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ : List[Any] =( os.path.join(__lowerCamelCase , f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) lowerCamelCase__ : int ={'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=__lowerCamelCase , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , planner=DefaultLoadPlanner() , ) lowerCamelCase__ : Union[str, Any] =state_dict['''model'''] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=0 ): """simple docstring""" os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ : Optional[int] =FSDP.optim_state_dict(__lowerCamelCase , __lowerCamelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowerCamelCase__ : int =( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCamelCase__ : Optional[int] =os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: lowerCamelCase__ : int =os.path.join(__lowerCamelCase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ : Tuple =None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowerCamelCase__ : Dict =( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCamelCase__ : List[Any] =os.path.join(__lowerCamelCase , __lowerCamelCase ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) lowerCamelCase__ : Optional[int] =torch.load(__lowerCamelCase ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: lowerCamelCase__ : str =( os.path.join(__lowerCamelCase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) lowerCamelCase__ : Union[str, Any] =load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , ) lowerCamelCase__ : str =optim_state['''optimizer'''] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) lowerCamelCase__ : List[str] =FSDP.optim_state_dict_to_load(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) optimizer.load_state_dict(__lowerCamelCase )
625
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["CLIPFeatureExtractor"] _lowercase : int = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
1
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowerCamelCase__ : List[str] ='''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : List[Any] ='''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : int =tax_attention_key lowerCamelCase__ : Optional[int] =tax_attention_out lowerCamelCase__ : List[Any] =tax_attention_query lowerCamelCase__ : Optional[Any] =tax_attention_value lowerCamelCase__ : List[str] =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : Optional[int] =tax_mlp_wi_a lowerCamelCase__ : Optional[int] =tax_mlp_wi_a else: lowerCamelCase__ : Union[str, Any] =tax_mlp_wi lowerCamelCase__ : str =tax_mlp_wo lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : str =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowerCamelCase__ : List[Any] =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel'''] lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel'''] lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel'''] lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : Union[str, Any] =tax_attention_key lowerCamelCase__ : str =tax_attention_out lowerCamelCase__ : Optional[int] =tax_attention_query lowerCamelCase__ : Dict =tax_attention_value lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key lowerCamelCase__ : Any =tax_enc_dec_attention_out lowerCamelCase__ : Any =tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value lowerCamelCase__ : Dict =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : Tuple =tax_mlp_wi_a lowerCamelCase__ : int =tax_mlp_wi_a else: lowerCamelCase__ : List[Any] =tax_mlp_wi lowerCamelCase__ : Dict =tax_mlp_wo lowerCamelCase__ : Tuple =txa_mlp_layer_norm lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowerCamelCase__ : int =txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Tuple =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding'''] lowerCamelCase__ : Dict =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__lowerCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) _lowercase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
625
"""simple docstring""" import os def snake_case__ ( ): """simple docstring""" with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ : Tuple =str(file.readlines()[0] ) lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : str =0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Dict =0 return total_score if __name__ == "__main__": print(solution())
625
1
"""simple docstring""" import requests _lowercase : Optional[int] = "" # <-- Put your OpenWeatherMap appid here! _lowercase : int = "https://api.openweathermap.org/data/2.5/" def snake_case__ ( __lowerCamelCase : str = "Chicago" , __lowerCamelCase : str = APPID ): """simple docstring""" return requests.get(URL_BASE + '''weather''' , params=locals() ).json() def snake_case__ ( __lowerCamelCase : str = "Kolkata, India" , __lowerCamelCase : str = APPID ): """simple docstring""" return requests.get(URL_BASE + '''forecast''' , params=locals() ).json() def snake_case__ ( __lowerCamelCase : float = 55.68 , __lowerCamelCase : float = 12.57 , __lowerCamelCase : str = APPID ): """simple docstring""" return requests.get(URL_BASE + '''onecall''' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: _lowercase : List[str] = input("Enter a location:").strip() if location: pprint(current_weather(location)) else: break
625
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str, lowerCamelCase : int )-> None: lowerCamelCase__ : str =value lowerCamelCase__ : Node | None =None lowerCamelCase__ : Node | None =None class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Node )-> None: lowerCamelCase__ : Any =tree def snake_case ( self : str, lowerCamelCase : Node | None )-> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Dict )-> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" from __future__ import annotations def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" if len(__lowerCamelCase ) == 0: return [] lowerCamelCase__ , lowerCamelCase__ : int =min(__lowerCamelCase ), max(__lowerCamelCase ) lowerCamelCase__ : Optional[int] =int(max_value - min_value ) + 1 lowerCamelCase__ : list[list] =[[] for _ in range(__lowerCamelCase )] for i in my_list: buckets[int(i - min_value )].append(__lowerCamelCase ) return [v for bucket in buckets for v in sorted(__lowerCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
625
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[str] = logging.getLogger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): """simple docstring""" # save results if os.path.exists(__lowerCamelCase ): if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =2 if unlogit: lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase ) lowerCamelCase__ : Tuple =0 return -plogp.sum(dim=-1 ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) ) for row in range(len(__lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) if head_mask is None: lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =0.0 lowerCamelCase__ : Union[str, Any] =0.0 for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase__) , ) : Any =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase__ : int =2 lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCamelCase ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase__ : Dict =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase ) print_ad_tensor(__lowerCamelCase ) return attn_entropy, head_importance, total_loss def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase ) lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold ) lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase__ : List[Any] =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase__ : int =float('''Inf''' ) lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1] if len(__lowerCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 ) lowerCamelCase__ : Optional[Any] =0.0 lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase ) lowerCamelCase__ : Tuple =new_head_mask.clone().detach() print_ad_tensor(__lowerCamelCase ) # Compute metric and head importance again lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Any =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Tuple =1 / loss lowerCamelCase__ : Optional[Any] =datetime.now() - before_time lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] =[ v, ] assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCamelCase ) lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , ) lowerCamelCase__ : str =1 / loss lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCamelCase , args.output_dir ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase__ : List[Any] =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank ) lowerCamelCase__ : Any =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel( __lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase ) elif args.n_gpu > 1: lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase ) # Prepare dataset lowerCamelCase__ : Union[str, Any] =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),) lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase ) lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase ) lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
625
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 2_5_0_0_0_4 _lowercase : Optional[Any] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = MBartTokenizer _a = MBartTokenizerFast _a = True _a = True def snake_case ( self : Tuple )-> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict )-> Union[str, Any]: lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def snake_case ( self : Tuple )-> List[Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : List[str] =tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ : Dict =tempfile.mkdtemp() lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ : Optional[int] =tempfile.mkdtemp() lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = 'facebook/mbart-large-en-ro' _a = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _a = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case ( cls : List[Any] )-> Optional[int]: lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) lowerCamelCase__ : Optional[int] =1 return cls def snake_case ( self : Optional[Any] )-> List[str]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 ) def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) def snake_case ( self : Optional[Any] )-> str: self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids ) lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase, lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], lowerCamelCase ) lowerCamelCase__ : Dict =10 lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2], 2 ) self.assertEqual(ids[-1], lowerCamelCase ) self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : int )-> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] ) def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : int =tempfile.mkdtemp() lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase ) @require_torch def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' ) lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case ( self : Optional[Any] )-> Any: lowerCamelCase__ : str =self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase, lowerCamelCase ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) lowerCamelCase__ : Any =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' ) lowerCamelCase__ : Tuple =self.tokenizer( text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' ) lowerCamelCase__ : Union[str, Any] =targets['''input_ids'''] lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : str =self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCamelCase ), { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 25_0004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_0001, }, )
625
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowerCamelCase__ : List[str] ='''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : List[Any] ='''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : int =tax_attention_key lowerCamelCase__ : Optional[int] =tax_attention_out lowerCamelCase__ : List[Any] =tax_attention_query lowerCamelCase__ : Optional[Any] =tax_attention_value lowerCamelCase__ : List[str] =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : Optional[int] =tax_mlp_wi_a lowerCamelCase__ : Optional[int] =tax_mlp_wi_a else: lowerCamelCase__ : Union[str, Any] =tax_mlp_wi lowerCamelCase__ : str =tax_mlp_wo lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : str =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowerCamelCase__ : List[Any] =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel'''] lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel'''] lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel'''] lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : Union[str, Any] =tax_attention_key lowerCamelCase__ : str =tax_attention_out lowerCamelCase__ : Optional[int] =tax_attention_query lowerCamelCase__ : Dict =tax_attention_value lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key lowerCamelCase__ : Any =tax_enc_dec_attention_out lowerCamelCase__ : Any =tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value lowerCamelCase__ : Dict =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : Tuple =tax_mlp_wi_a lowerCamelCase__ : int =tax_mlp_wi_a else: lowerCamelCase__ : List[Any] =tax_mlp_wi lowerCamelCase__ : Dict =tax_mlp_wo lowerCamelCase__ : Tuple =txa_mlp_layer_norm lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowerCamelCase__ : int =txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Tuple =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding'''] lowerCamelCase__ : Dict =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__lowerCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) _lowercase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
625
1
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int]=7 ): """simple docstring""" lowerCamelCase__ : List[Any] =None if token is not None: lowerCamelCase__ : Union[str, Any] ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''} # The id of a workflow (not of a workflow run) lowerCamelCase__ : Optional[int] ='''636036''' lowerCamelCase__ : Union[str, Any] =f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' lowerCamelCase__ : str =requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json() return result["workflow_runs"] def snake_case__ ( __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : str =get_daily_ci_runs(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowerCamelCase__ : Union[str, Any] =workflow_run['''id'''] break return workflow_run_id def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : List[str] =get_last_daily_ci_runs(__lowerCamelCase ) if workflow_run_id is not None: lowerCamelCase__ : Optional[Any] =get_artifacts_links(worflow_run_id=__lowerCamelCase , token=__lowerCamelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowerCamelCase__ : Union[str, Any] =artifacts_links[artifact_name] download_artifact( artifact_name=__lowerCamelCase , artifact_url=__lowerCamelCase , output_dir=__lowerCamelCase , token=__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Dict ): """simple docstring""" get_last_daily_ci_artifacts(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={} for artifact_name in artifact_names: lowerCamelCase__ : Any =os.path.join(__lowerCamelCase , f'''{artifact_name}.zip''' ) if os.path.isfile(__lowerCamelCase ): lowerCamelCase__ : List[str] ={} with zipfile.ZipFile(__lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowerCamelCase ): # read the file with z.open(__lowerCamelCase ) as f: lowerCamelCase__ : Dict =f.read().decode('''UTF-8''' ) return results
625
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]: lowerCamelCase__ : List[str] =parent lowerCamelCase__ : Tuple =batch_size lowerCamelCase__ : str =image_size lowerCamelCase__ : Any =num_channels lowerCamelCase__ : Tuple =num_stages lowerCamelCase__ : List[str] =hidden_sizes lowerCamelCase__ : Any =depths lowerCamelCase__ : Union[str, Any] =is_training lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : int =intermediate_size lowerCamelCase__ : Optional[int] =hidden_act lowerCamelCase__ : Dict =type_sequence_label_size lowerCamelCase__ : Tuple =initializer_range lowerCamelCase__ : Any =out_features lowerCamelCase__ : Tuple =num_labels lowerCamelCase__ : Optional[int] =scope lowerCamelCase__ : Optional[int] =num_stages def snake_case ( self : str )-> Optional[int]: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple =None if self.use_labels: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : int =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Any: return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def snake_case ( self : Union[str, Any] )-> Any: return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, ) def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : int =model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any =config_and_inputs lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (UperNetForSemanticSegmentation,) if is_torch_available() else () _a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Optional[Any] =UperNetModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Optional[int] )-> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] )-> Dict: return def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) lowerCamelCase__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple =[*signature.parameters.keys()] lowerCamelCase__ : List[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : Any )-> Union[str, Any]: lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def snake_case ( self : Optional[Any] )-> List[Any]: pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def snake_case ( self : Any )-> List[str]: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : int )-> Any: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : Dict )-> str: pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def snake_case ( self : List[Any] )-> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : Tuple )-> str: pass def snake_case ( self : Optional[int] )-> List[str]: def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ): lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : List[str] =self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[Any] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : str =_config_zero_init(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def snake_case ( self : Any )-> str: pass @slow def snake_case ( self : int )-> Union[str, Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : str )-> Union[str, Any]: lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : List[Any] =prepare_img() lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Any =model(**lowerCamelCase ) lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : List[str] =torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
625
1
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _lowercase : Dict = input("Enter image url: ").strip() print(f'Downloading image from {url} ...') _lowercase : str = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image _lowercase : Optional[int] = soup.find("meta", {"property": "og:image"})["content"] _lowercase : List[Any] = requests.get(image_url).content _lowercase : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg' with open(file_name, "wb") as fp: fp.write(image_data) print(f'Done. Image saved to disk as {file_name}.')
625
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ): '''simple docstring''' _a = ['onnx'] def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]: requires_backends(self, ['''onnx'''] ) @classmethod def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]: requires_backends(cls, ['''onnx'''] ) @classmethod def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]: requires_backends(cls, ['''onnx'''] )
625
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): _lowercase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowercase : int = 1_2_8_0_2_2 _lowercase : List[str] = 1_2_8_0_2_8 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = MaMaaaTokenizer _a = False _a = False _a = True def snake_case ( self : Optional[int] )-> List[Any]: super().setUp() lowerCamelCase__ : str =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowerCamelCase__ : List[str] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) ) lowerCamelCase__ : Tuple =Path(self.tmpdirname ) save_json(lowerCamelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCamelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase__ : str =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : List[Any], **lowerCamelCase : Union[str, Any] )-> Optional[int]: return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase ) def snake_case ( self : List[str], lowerCamelCase : Optional[int] )-> Union[str, Any]: return ( "This is a test", "This is a test", ) def snake_case ( self : Tuple )-> List[str]: lowerCamelCase__ : Tuple ='''</s>''' lowerCamelCase__ : Dict =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ), lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : Optional[Any] )-> Optional[Any]: lowerCamelCase__ : List[Any] =self.get_tokenizer() lowerCamelCase__ : str =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''</s>''' ) self.assertEqual(vocab_keys[1], '''<unk>''' ) self.assertEqual(vocab_keys[-1], '''<s>''' ) self.assertEqual(len(lowerCamelCase ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def snake_case ( self : str )-> Any: pass def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Dict =self.get_tokenizer() lowerCamelCase__ : Any =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [2, 3, 4, 5, 6], ) lowerCamelCase__ : Any =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowerCamelCase__ : Optional[int] =tokenizer.convert_tokens_to_string(lowerCamelCase ) self.assertEqual(lowerCamelCase, '''This is a test''' ) @slow def snake_case ( self : str )-> Tuple: # fmt: off lowerCamelCase__ : Any ={'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase, model_name='''facebook/m2m100_418M''', revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''', ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = 'facebook/m2m100_418M' _a = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] _a = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off _a = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def snake_case ( cls : Optional[Any] )-> Optional[int]: lowerCamelCase__ : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en''', tgt_lang='''fr''' ) lowerCamelCase__ : str =1 return cls def snake_case ( self : int )-> Optional[int]: self.assertEqual(self.tokenizer.get_lang_id('''ar''' ), 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ), 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ), 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ), 12_8063 ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : Any =self.tokenizer.get_vocab() self.assertEqual(len(lowerCamelCase ), self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''], 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ), lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : Tuple ='''en''' lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) def snake_case ( self : int )-> Tuple: self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids ) # fmt: off lowerCamelCase__ : Tuple =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase, lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase ) def snake_case ( self : Union[str, Any] )-> str: lowerCamelCase__ : Union[str, Any] =tempfile.mkdtemp() lowerCamelCase__ : Optional[Any] =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCamelCase ) lowerCamelCase__ : List[str] =MaMaaaTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.lang_token_to_id, lowerCamelCase ) @require_torch def snake_case ( self : List[Any] )-> Optional[int]: lowerCamelCase__ : Dict ='''en''' lowerCamelCase__ : Tuple ='''fr''' lowerCamelCase__ : Union[str, Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' ) lowerCamelCase__ : Dict =shift_tokens_right( batch['''labels'''], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id ) for k in batch: lowerCamelCase__ : List[str] =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[str] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) lowerCamelCase__ : Optional[int] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) @require_torch def snake_case ( self : Dict )-> str: lowerCamelCase__ : str ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowerCamelCase__ : Dict ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def snake_case ( self : Any )-> str: lowerCamelCase__ : Any =self.tokenizer._build_translation_inputs('''A test''', return_tensors='''pt''', src_lang='''en''', tgt_lang='''ar''' ) self.assertEqual( nested_simplify(lowerCamelCase ), { # en_XX, A, test, EOS '''input_ids''': [[12_8022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 12_8006, }, )
625
"""simple docstring""" import colorsys from PIL import Image # type: ignore def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =x lowerCamelCase__ : Any =y for step in range(__lowerCamelCase ): # noqa: B007 lowerCamelCase__ : List[Any] =a * a - b * b + x lowerCamelCase__ : Optional[int] =2 * a * b + y lowerCamelCase__ : Union[str, Any] =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) ) lowerCamelCase__ : Optional[int] =img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
625
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = StableUnCLIPImgaImgPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _a = frozenset([] ) def snake_case ( self : List[str] )-> str: lowerCamelCase__ : Dict =32 lowerCamelCase__ : Optional[Any] =embedder_hidden_size # image encoding components lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple =CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0 ) lowerCamelCase__ : Dict =UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, ) torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =DDIMScheduler( beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =AutoencoderKL() lowerCamelCase__ : int ={ # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]: if str(lowerCamelCase ).startswith('''mps''' ): lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase ) else: lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: lowerCamelCase__ : int =input_image * 0.5 + 0.5 lowerCamelCase__ : Dict =input_image.clamp(0, 1 ) lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : str =self.get_dummy_components() lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase ) lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase ) inputs.update({'''image_embeds''': None} ) lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self : int )-> Tuple: lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def snake_case ( self : List[str] )-> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[int] )-> int: lowerCamelCase__ : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : Optional[int] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : List[Any] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : Tuple =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : List[Any] =pipe( lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', ) lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
625
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def snake_case__ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =VideoMAEConfig() set_architecture_configs(__lowerCamelCase , __lowerCamelCase ) if "finetuned" not in model_name: lowerCamelCase__ : int =False if "finetuned" in model_name: lowerCamelCase__ : str ='''huggingface/label-files''' if "kinetics" in model_name: lowerCamelCase__ : List[Any] =400 lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json''' elif "ssv2" in model_name: lowerCamelCase__ : Tuple =174 lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Dict =idalabel lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()} return config def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): """simple docstring""" if "small" in model_name: lowerCamelCase__ : Optional[Any] =384 lowerCamelCase__ : List[Any] =1536 lowerCamelCase__ : int =12 lowerCamelCase__ : Dict =16 lowerCamelCase__ : List[Any] =12 lowerCamelCase__ : Optional[Any] =3 lowerCamelCase__ : Union[str, Any] =192 lowerCamelCase__ : str =768 elif "large" in model_name: lowerCamelCase__ : Union[str, Any] =1024 lowerCamelCase__ : str =4096 lowerCamelCase__ : int =24 lowerCamelCase__ : Dict =16 lowerCamelCase__ : Union[str, Any] =12 lowerCamelCase__ : List[Any] =8 lowerCamelCase__ : int =512 lowerCamelCase__ : Optional[Any] =2048 elif "huge" in model_name: lowerCamelCase__ : Optional[int] =1280 lowerCamelCase__ : Optional[int] =5120 lowerCamelCase__ : List[Any] =32 lowerCamelCase__ : List[Any] =16 lowerCamelCase__ : Optional[Any] =12 lowerCamelCase__ : Dict =8 lowerCamelCase__ : List[Any] =640 lowerCamelCase__ : Any =2560 elif "base" not in model_name: raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" if "encoder." in name: lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' ) if "cls_token" in name: lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' ) if "attn" in name: lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' ) return name def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase ) if key.startswith('''encoder.''' ): lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' ) if "qkv" in key: lowerCamelCase__ : Any =key.split('''.''' ) if key.startswith('''decoder.blocks''' ): lowerCamelCase__ : Tuple =config.decoder_hidden_size lowerCamelCase__ : str =int(key_split[2] ) lowerCamelCase__ : Any ='''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase__ : List[Any] =val[:dim, :] lowerCamelCase__ : Any =val[dim : dim * 2, :] lowerCamelCase__ : Dict =val[-dim:, :] else: lowerCamelCase__ : Optional[Any] =config.hidden_size lowerCamelCase__ : Optional[Any] =int(key_split[1] ) lowerCamelCase__ : str ='''videomae.encoder.layer.''' if "weight" in key: lowerCamelCase__ : int =val[:dim, :] lowerCamelCase__ : Tuple =val[dim : dim * 2, :] lowerCamelCase__ : List[Any] =val[-dim:, :] else: lowerCamelCase__ : int =val return orig_state_dict def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : List[Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase ) return list(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase ) if "finetuned" in model_name: lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase ) else: lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase ) # download original checkpoint, hosted on Google Drive lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin''' gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' ) if "model" in files: lowerCamelCase__ : Dict =files['''model'''] else: lowerCamelCase__ : str =files['''module'''] lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # verify model on basic input lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) lowerCamelCase__ : int =prepare_video() lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' ) if "finetuned" not in model_name: lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase ) lowerCamelCase__ : int =model(**__lowerCamelCase ) lowerCamelCase__ : Dict =outputs.logits lowerCamelCase__ : List[str] =[ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": lowerCamelCase__ : int =torch.Size([1, 174] ) lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": lowerCamelCase__ : Any =torch.Size([1, 400] ) lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": lowerCamelCase__ : List[str] =torch.Size([1, 400] ) lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": lowerCamelCase__ : str =torch.Size([1, 400] ) lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": lowerCamelCase__ : Optional[int] =torch.Size([1, 174] ) lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] ) lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": lowerCamelCase__ : str =torch.Size([1, 174] ) lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": lowerCamelCase__ : str =outputs.loss assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowercase : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
625
1
"""simple docstring""" import os from math import logaa def snake_case__ ( __lowerCamelCase : str = "base_exp.txt" ): """simple docstring""" lowerCamelCase__ : float =0 lowerCamelCase__ : Any =0 for i, line in enumerate(open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =list(map(__lowerCamelCase , line.split(''',''' ) ) ) if x * logaa(__lowerCamelCase ) > largest: lowerCamelCase__ : Union[str, Any] =x * logaa(__lowerCamelCase ) lowerCamelCase__ : List[str] =i + 1 return result if __name__ == "__main__": print(solution())
625
"""simple docstring""" _lowercase : str = 0 # The first color of the flag. _lowercase : Dict = 1 # The second color of the flag. _lowercase : Tuple = 2 # The third color of the flag. _lowercase : Optional[int] = (red, white, blue) def snake_case__ ( __lowerCamelCase : list ): """simple docstring""" if not sequence: return [] if len(__lowerCamelCase ) == 1: return list(__lowerCamelCase ) lowerCamelCase__ : List[Any] =0 lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1 lowerCamelCase__ : Tuple =0 while mid <= high: if sequence[mid] == colors[0]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid] high -= 1 else: lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(__lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip() _lowercase : int = [int(item.strip()) for item in user_input.split(",")] print(f'{dutch_national_flag_sort(unsorted)}')
625
1
"""simple docstring""" def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : bool = False ): """simple docstring""" if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =f'''Expected string as input, found {type(__lowerCamelCase )}''' raise ValueError(__lowerCamelCase ) if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Tuple =f'''Expected boolean as use_pascal parameter, found {type(__lowerCamelCase )}''' raise ValueError(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =input_str.split('''_''' ) lowerCamelCase__ : Union[str, Any] =0 if use_pascal else 1 lowerCamelCase__ : Tuple =words[start_index:] lowerCamelCase__ : Optional[Any] =[word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase__ : Dict ='''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
625
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = StableUnCLIPImgaImgPipeline _a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _a = frozenset([] ) def snake_case ( self : List[str] )-> str: lowerCamelCase__ : Dict =32 lowerCamelCase__ : Optional[Any] =embedder_hidden_size # image encoding components lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple =CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0 ) lowerCamelCase__ : Dict =UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, ) torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] =DDIMScheduler( beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] =AutoencoderKL() lowerCamelCase__ : int ={ # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]: if str(lowerCamelCase ).startswith('''mps''' ): lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase ) else: lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: lowerCamelCase__ : int =input_image * 0.5 + 0.5 lowerCamelCase__ : Dict =input_image.clamp(0, 1 ) lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : str =self.get_dummy_components() lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase ) lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase ) inputs.update({'''image_embeds''': None} ) lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self : int )-> Tuple: lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def snake_case ( self : int )-> Optional[Any]: lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def snake_case ( self : List[str] )-> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[int] )-> int: lowerCamelCase__ : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : Optional[int] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : List[Any] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Tuple: lowerCamelCase__ : Any =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) lowerCamelCase__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' ) lowerCamelCase__ : Tuple =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa ) lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ : List[Any] =pipe( lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', ) lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
625
1
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Dict=14, lowerCamelCase : List[Any]=7, lowerCamelCase : Dict=True, lowerCamelCase : Tuple=True, lowerCamelCase : int=False, lowerCamelCase : str=True, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : str=4, lowerCamelCase : Tuple=4, lowerCamelCase : Any=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : Dict="gelu", lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Tuple=512, lowerCamelCase : int=0.02, )-> List[Any]: lowerCamelCase__ : Union[str, Any] =parent lowerCamelCase__ : List[Any] =batch_size lowerCamelCase__ : int =seq_length lowerCamelCase__ : List[str] =is_training lowerCamelCase__ : Tuple =use_input_mask lowerCamelCase__ : List[Any] =use_token_type_ids lowerCamelCase__ : Any =use_labels lowerCamelCase__ : Tuple =vocab_size lowerCamelCase__ : Optional[int] =hidden_size lowerCamelCase__ : Dict =rotary_dim lowerCamelCase__ : str =num_hidden_layers lowerCamelCase__ : Union[str, Any] =num_attention_heads lowerCamelCase__ : List[str] =intermediate_size lowerCamelCase__ : Optional[Any] =hidden_act lowerCamelCase__ : Optional[int] =hidden_dropout_prob lowerCamelCase__ : Optional[Any] =attention_probs_dropout_prob lowerCamelCase__ : Tuple =max_position_embeddings lowerCamelCase__ : Any =initializer_range lowerCamelCase__ : List[Any] =None lowerCamelCase__ : Optional[Any] =vocab_size - 1 lowerCamelCase__ : Dict =vocab_size - 1 lowerCamelCase__ : Dict =vocab_size - 1 def snake_case ( self : List[str] )-> int: lowerCamelCase__ : List[str] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : int =None if self.use_input_mask: lowerCamelCase__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Tuple =GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowerCamelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) return (config, input_ids, input_mask) def snake_case ( self : Optional[Any] )-> Union[str, Any]: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =config_and_inputs lowerCamelCase__ : str ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple )-> List[Any]: lowerCamelCase__ : Tuple =20 lowerCamelCase__ : Union[str, Any] =model_class_name(lowerCamelCase ) lowerCamelCase__ : Any =model.init_cache(input_ids.shape[0], lowerCamelCase ) lowerCamelCase__ : Optional[int] =jnp.ones((input_ids.shape[0], max_decoder_length), dtype='''i4''' ) lowerCamelCase__ : List[Any] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase__ : Union[str, Any] =model( input_ids[:, :-1], attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, position_ids=lowerCamelCase, ) lowerCamelCase__ : Optional[Any] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='''i4''' ) lowerCamelCase__ : str =model( input_ids[:, -1:], attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, position_ids=lowerCamelCase, ) lowerCamelCase__ : Tuple =model(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' ) def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Optional[int] )-> str: lowerCamelCase__ : Any =20 lowerCamelCase__ : int =model_class_name(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, ) lowerCamelCase__ : List[str] =model.init_cache(input_ids.shape[0], lowerCamelCase ) lowerCamelCase__ : Optional[int] =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase__ : str =model( input_ids[:, :-1], attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, position_ids=lowerCamelCase, ) lowerCamelCase__ : Optional[int] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='''i4''' ) lowerCamelCase__ : str =model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowerCamelCase, position_ids=lowerCamelCase, ) lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _a = (FlaxGPTJForCausalLM,) if is_flax_available() else () def snake_case ( self : Dict )-> Optional[Any]: lowerCamelCase__ : Optional[Any] =FlaxGPTJModelTester(self ) def snake_case ( self : Optional[Any] )-> List[str]: for model_class_name in self.all_model_classes: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any] )-> List[str]: for model_class_name in self.all_model_classes: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) @tooslow def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : Tuple =GPTaTokenizer.from_pretrained('''gpt2''', pad_token='''<|endoftext|>''', padding_side='''left''' ) lowerCamelCase__ : Any =tokenizer(['''Hello this is a long string''', '''Hey'''], return_tensors='''np''', padding=lowerCamelCase, truncation=lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) lowerCamelCase__ : str =False lowerCamelCase__ : Union[str, Any] =model.config.eos_token_id lowerCamelCase__ : Optional[int] =jax.jit(model.generate ) lowerCamelCase__ : List[Any] =jit_generate( inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], pad_token_id=tokenizer.pad_token_id ).sequences lowerCamelCase__ : Optional[Any] =tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : Dict =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(lowerCamelCase, lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> Any: lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase__ : Tuple =self._prepare_for_class(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase__ : List[Any] =model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase__ : Union[str, Any] =getattr(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =pt_inputs['''input_ids'''].shape lowerCamelCase__ : str =np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase ): lowerCamelCase__ : Dict =0 lowerCamelCase__ : Optional[int] =1 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : List[str] =1 lowerCamelCase__ : str =pt_model_class(lowerCamelCase ).eval() lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Dict =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =fx_state with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : str =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowerCamelCase, lowerCamelCase ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =model_class.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : str =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual( len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(lowerCamelCase, lowerCamelCase ): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4E-2 ) @is_pt_flax_cross_test def snake_case ( self : Any )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase__ : int =self._prepare_for_class(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : int ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase__ : Tuple =model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase__ : str =getattr(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =pt_model_class(lowerCamelCase ).eval() lowerCamelCase__ : Any =model_class(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) lowerCamelCase__ , lowerCamelCase__ : Any =pt_inputs['''input_ids'''].shape lowerCamelCase__ : List[Any] =np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase ): lowerCamelCase__ : List[Any] =0 lowerCamelCase__ : Optional[int] =1 lowerCamelCase__ : Any =0 lowerCamelCase__ : str =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCamelCase__ : Optional[int] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : str =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowerCamelCase, lowerCamelCase ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =pt_model_class.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Optional[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual( len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowerCamelCase, lowerCamelCase ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) @tooslow def snake_case ( self : List[Any] )-> Any: for model_class_name in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) lowerCamelCase__ : List[str] =model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 4000000 ): """simple docstring""" lowerCamelCase__ : Dict =[] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b return sum(__lowerCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
625
1
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _lowercase : List[str] = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } _lowercase : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Any=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Dict =create_model( '''HTSAT-tiny''' , '''roberta''' , __lowerCamelCase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=__lowerCamelCase , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def snake_case__ ( __lowerCamelCase : List[str] ): """simple docstring""" lowerCamelCase__ : Any ={} lowerCamelCase__ : Union[str, Any] =R'''.*sequential.(\d+).*''' lowerCamelCase__ : Dict =R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCamelCase__ : Any =key.replace(__lowerCamelCase , __lowerCamelCase ) if re.match(__lowerCamelCase , __lowerCamelCase ): # replace sequential layers with list lowerCamelCase__ : Tuple =re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) lowerCamelCase__ : Optional[int] =key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__lowerCamelCase )//3}.linear.''' ) elif re.match(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... lowerCamelCase__ : Union[str, Any] =1 if projecton_layer == 0 else 2 lowerCamelCase__ : Optional[Any] =key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value lowerCamelCase__ : List[Any] =value lowerCamelCase__ : Optional[Any] =mixed_qkv.size(0 ) // 3 lowerCamelCase__ : str =mixed_qkv[:qkv_dim] lowerCamelCase__ : Optional[int] =mixed_qkv[qkv_dim : qkv_dim * 2] lowerCamelCase__ : Union[str, Any] =mixed_qkv[qkv_dim * 2 :] lowerCamelCase__ : str =query_layer lowerCamelCase__ : List[str] =key_layer lowerCamelCase__ : Dict =value_layer else: lowerCamelCase__ : List[str] =value return model_state_dict def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase ) clap_model.eval() lowerCamelCase__ : Any =clap_model.state_dict() lowerCamelCase__ : Tuple =rename_state_dict(__lowerCamelCase ) lowerCamelCase__ : List[str] =ClapConfig() lowerCamelCase__ : List[Any] =enable_fusion lowerCamelCase__ : List[str] =ClapModel(__lowerCamelCase ) # ignore the spectrogram embedding layer model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) transformers_config.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _lowercase : List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") _lowercase : Union[str, Any] = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
625
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = BlenderbotSmallConfig _a = {} _a = 'gelu' def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]: lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =batch_size lowerCamelCase__ : Optional[int] =seq_length lowerCamelCase__ : Tuple =is_training lowerCamelCase__ : Dict =use_labels lowerCamelCase__ : List[Any] =vocab_size lowerCamelCase__ : str =hidden_size lowerCamelCase__ : str =num_hidden_layers lowerCamelCase__ : Union[str, Any] =num_attention_heads lowerCamelCase__ : Any =intermediate_size lowerCamelCase__ : Dict =hidden_dropout_prob lowerCamelCase__ : List[Any] =attention_probs_dropout_prob lowerCamelCase__ : str =max_position_embeddings lowerCamelCase__ : Optional[int] =eos_token_id lowerCamelCase__ : str =pad_token_id lowerCamelCase__ : Union[str, Any] =bos_token_id def snake_case ( self : Any )-> Any: lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 ) lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : int =self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return config, inputs_dict def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]: lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder() lowerCamelCase__ : List[Any] =inputs_dict['''input_ids'''] lowerCamelCase__ : Optional[int] =input_ids[:1, :] lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :] lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask'''] lowerCamelCase__ : Optional[Any] =1 # first forward pass lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 ) lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0] lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) ) lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ): """simple docstring""" if attention_mask is None: lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ : str =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _a = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _a = True _a = False _a = False def snake_case ( self : Any )-> str: lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase ) def snake_case ( self : Any )-> Optional[int]: self.config_tester.run_common_tests() def snake_case ( self : int )-> str: lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] _a = 'facebook/blenderbot_small-90M' @cached_property def snake_case ( self : Any )-> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def snake_case ( self : int )-> List[Any]: lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' ) lowerCamelCase__ : Any =self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, ) lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
625
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=13, lowerCamelCase : Tuple=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : List[str]=True, lowerCamelCase : str=99, lowerCamelCase : List[str]=32, lowerCamelCase : Optional[Any]=5, lowerCamelCase : Dict=4, lowerCamelCase : Optional[Any]=37, lowerCamelCase : Any="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : str=512, lowerCamelCase : List[str]=16, lowerCamelCase : int=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : List[str]=3, lowerCamelCase : Optional[int]=4, lowerCamelCase : Any=None, )-> List[Any]: lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =batch_size lowerCamelCase__ : Union[str, Any] =seq_length lowerCamelCase__ : Dict =is_training lowerCamelCase__ : Union[str, Any] =use_input_mask lowerCamelCase__ : Optional[Any] =use_token_type_ids lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : Union[str, Any] =vocab_size lowerCamelCase__ : str =hidden_size lowerCamelCase__ : List[str] =num_hidden_layers lowerCamelCase__ : int =num_attention_heads lowerCamelCase__ : List[str] =intermediate_size lowerCamelCase__ : str =hidden_act lowerCamelCase__ : int =hidden_dropout_prob lowerCamelCase__ : Tuple =attention_probs_dropout_prob lowerCamelCase__ : str =max_position_embeddings lowerCamelCase__ : Tuple =type_vocab_size lowerCamelCase__ : Tuple =type_sequence_label_size lowerCamelCase__ : Dict =initializer_range lowerCamelCase__ : Any =num_labels lowerCamelCase__ : Tuple =num_choices lowerCamelCase__ : Optional[Any] =scope def snake_case ( self : List[str] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase__ : Union[str, Any] =None if self.use_input_mask: lowerCamelCase__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Any =None if self.use_token_type_ids: lowerCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : int =None lowerCamelCase__ : Optional[int] =None if self.use_labels: lowerCamelCase__ : int =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase__ : List[str] =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase__ : Union[str, Any] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self : str )-> int: return OpenLlamaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, use_stable_embedding=lowerCamelCase, ) def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : List[str] )-> Tuple: lowerCamelCase__ : List[str] =OpenLlamaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : List[Any] =model(lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : Any =model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : List[str], lowerCamelCase : int, )-> Any: lowerCamelCase__ : Optional[Any] =True lowerCamelCase__ : Optional[int] =OpenLlamaModel(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : str =model( lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, ) lowerCamelCase__ : List[Any] =model( lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, ) lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Dict, lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : List[str], )-> List[str]: lowerCamelCase__ : Tuple =OpenLlamaForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : int, )-> Tuple: lowerCamelCase__ : List[str] =True lowerCamelCase__ : Tuple =True lowerCamelCase__ : Any =OpenLlamaForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # first forward pass lowerCamelCase__ : Optional[int] =model( lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, use_cache=lowerCamelCase, ) lowerCamelCase__ : Any =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase__ : List[Any] =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase__ : Any =ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and lowerCamelCase__ : Dict =torch.cat([input_ids, next_tokens], dim=-1 ) lowerCamelCase__ : str =torch.cat([input_mask, next_mask], dim=-1 ) lowerCamelCase__ : Union[str, Any] =model( lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, output_hidden_states=lowerCamelCase, )['''hidden_states'''][0] lowerCamelCase__ : Optional[Any] =model( lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, output_hidden_states=lowerCamelCase, )['''hidden_states'''][0] # select random slice lowerCamelCase__ : str =ids_tensor((1,), output_from_past.shape[-1] ).item() lowerCamelCase__ : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase__ : Tuple =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) ) def snake_case ( self : List[Any] )-> List[Any]: lowerCamelCase__ : str =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any =config_and_inputs lowerCamelCase__ : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _a = (OpenLlamaForCausalLM,) if is_torch_available() else () _a = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _a = False _a = False def snake_case ( self : Any )-> Optional[Any]: lowerCamelCase__ : Any =OpenLlamaModelTester(self ) lowerCamelCase__ : int =ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Tuple )-> Dict: self.config_tester.run_common_tests() def snake_case ( self : str )-> Optional[Any]: lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def snake_case ( self : Dict )-> int: lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : List[Any] =type self.model_tester.create_and_check_model(*lowerCamelCase ) def snake_case ( self : List[str] )-> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[Any] =3 lowerCamelCase__ : int =input_dict['''input_ids'''] lowerCamelCase__ : Dict =input_ids.ne(1 ).to(lowerCamelCase ) lowerCamelCase__ : int =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size ) lowerCamelCase__ : int =OpenLlamaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : Any =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase ) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self : int )-> Union[str, Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] =3 lowerCamelCase__ : Optional[Any] ='''single_label_classification''' lowerCamelCase__ : int =input_dict['''input_ids'''] lowerCamelCase__ : int =input_ids.ne(1 ).to(lowerCamelCase ) lowerCamelCase__ : Dict =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] =OpenLlamaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase ) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] =3 lowerCamelCase__ : Union[str, Any] ='''multi_label_classification''' lowerCamelCase__ : List[Any] =input_dict['''input_ids'''] lowerCamelCase__ : List[str] =input_ids.ne(1 ).to(lowerCamelCase ) lowerCamelCase__ : int =ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase__ : str =OpenLlamaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : int =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase ) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def snake_case ( self : List[str] )-> Union[str, Any]: pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def snake_case ( self : str, lowerCamelCase : Tuple )-> Dict: lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : List[str] =ids_tensor([1, 10], config.vocab_size ) lowerCamelCase__ : List[str] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase__ : str =OpenLlamaModel(lowerCamelCase ) original_model.to(lowerCamelCase ) original_model.eval() lowerCamelCase__ : Tuple =original_model(lowerCamelCase ).last_hidden_state lowerCamelCase__ : List[str] =original_model(lowerCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase__ : List[Any] ={'''type''': scaling_type, '''factor''': 10.0} lowerCamelCase__ : Tuple =OpenLlamaModel(lowerCamelCase ) scaled_model.to(lowerCamelCase ) scaled_model.eval() lowerCamelCase__ : Any =scaled_model(lowerCamelCase ).last_hidden_state lowerCamelCase__ : Optional[Any] =scaled_model(lowerCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ): """simple docstring""" # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ): """simple docstring""" # Base Case if curr_ind == len(__lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__lowerCamelCase ) ): if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): # Insert current vertex into path as next transition lowerCamelCase__ : Tuple =next_ver # Validate created path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ): return True # Backtrack lowerCamelCase__ : int =-1 return False def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ): """simple docstring""" lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1) # initialize start and end of path with starting index lowerCamelCase__ : Union[str, Any] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
625
1
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[str] = logging.getLogger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): """simple docstring""" # save results if os.path.exists(__lowerCamelCase ): if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =2 if unlogit: lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase ) lowerCamelCase__ : Tuple =0 return -plogp.sum(dim=-1 ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) ) for row in range(len(__lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) if head_mask is None: lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =0.0 lowerCamelCase__ : Union[str, Any] =0.0 for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase__) , ) : Any =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase__ : int =2 lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCamelCase ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase__ : Dict =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase ) print_ad_tensor(__lowerCamelCase ) return attn_entropy, head_importance, total_loss def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase ) lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold ) lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase__ : List[Any] =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase__ : int =float('''Inf''' ) lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1] if len(__lowerCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 ) lowerCamelCase__ : Optional[Any] =0.0 lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase ) lowerCamelCase__ : Tuple =new_head_mask.clone().detach() print_ad_tensor(__lowerCamelCase ) # Compute metric and head importance again lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Any =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Tuple =1 / loss lowerCamelCase__ : Optional[Any] =datetime.now() - before_time lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] =[ v, ] assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCamelCase ) lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , ) lowerCamelCase__ : str =1 / loss lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCamelCase , args.output_dir ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase__ : List[Any] =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank ) lowerCamelCase__ : Any =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel( __lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase ) elif args.n_gpu > 1: lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase ) # Prepare dataset lowerCamelCase__ : Union[str, Any] =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),) lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase ) lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase ) lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
625
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : List[str] = 2_5_0_0_0_4 _lowercase : Optional[Any] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = MBartTokenizer _a = MBartTokenizerFast _a = True _a = True def snake_case ( self : Tuple )-> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict )-> Union[str, Any]: lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase ) lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def snake_case ( self : Tuple )-> List[Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : List[str] =tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ : Dict =tempfile.mkdtemp() lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase, lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ : Optional[int] =tempfile.mkdtemp() lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase ) lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase ) lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' _a = 'facebook/mbart-large-en-ro' _a = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _a = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case ( cls : List[Any] )-> Optional[int]: lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) lowerCamelCase__ : Optional[int] =1 return cls def snake_case ( self : Optional[Any] )-> List[str]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 ) def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) def snake_case ( self : Optional[Any] )-> str: self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids ) lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase ) lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase, lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], lowerCamelCase ) lowerCamelCase__ : Dict =10 lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2], 2 ) self.assertEqual(ids[-1], lowerCamelCase ) self.assertEqual(len(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : int )-> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] ) def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : int =tempfile.mkdtemp() lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase ) @require_torch def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' ) lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case ( self : Optional[Any] )-> Any: lowerCamelCase__ : str =self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase, lowerCamelCase ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) lowerCamelCase__ : Any =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowerCamelCase ) self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' ) lowerCamelCase__ : Tuple =self.tokenizer( text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' ) lowerCamelCase__ : Union[str, Any] =targets['''input_ids'''] lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def snake_case ( self : Optional[int] )-> List[Any]: lowerCamelCase__ : str =self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCamelCase ), { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 25_0004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_0001, }, )
625
1
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _lowercase : Any = logging.getLogger() def snake_case__ ( __lowerCamelCase : Path , __lowerCamelCase : list ): """simple docstring""" lowerCamelCase__ : List[str] ='''\n'''.join(__lowerCamelCase ) Path(__lowerCamelCase ).open('''w''' ).writelines(__lowerCamelCase ) _lowercase : List[Any] = "patrickvonplaten/t5-tiny-random" _lowercase : Optional[Any] = "sshleifer/bart-tiny-random" _lowercase : Tuple = "sshleifer/tiny-mbart" _lowercase : List[Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def snake_case ( self : str, lowerCamelCase : Union[str, Any] )-> List[str]: lowerCamelCase__ : Dict =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' lowerCamelCase__ : int =input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() lowerCamelCase__ : int =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : int =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) lowerCamelCase__ : int ='''translation_en_to_de''' if model == T5_TINY else '''summarization''' lowerCamelCase__ : Optional[int] =F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ): run_generate() assert Path(lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def snake_case ( self : Dict )-> Dict: self.run_eval_tester(lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def snake_case ( self : List[str], lowerCamelCase : Tuple )-> Tuple: self.run_eval_tester(lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def snake_case ( self : Union[str, Any], lowerCamelCase : Any )-> Optional[int]: lowerCamelCase__ : int =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' lowerCamelCase__ : Any =input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() lowerCamelCase__ : Any ={ '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } lowerCamelCase__ : Optional[int] =Path(self.get_auto_remove_tmp_dir() ) lowerCamelCase__ : List[str] =str(tmp_dir / '''scores.json''' ) lowerCamelCase__ : str =str(tmp_dir / '''val.target''' ) _dump_articles(lowerCamelCase, text['''en'''] ) _dump_articles(lowerCamelCase, text['''de'''] ) lowerCamelCase__ : int ='''translation_en_to_de''' if model == T5_TINY else '''summarization''' lowerCamelCase__ : str =F''' run_eval_search.py {model} {str(lowerCamelCase )} {str(lowerCamelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ): with CaptureStdout() as cs: run_search() lowerCamelCase__ : Dict =[''' num_beams | length_penalty''', model, '''Best score args'''] lowerCamelCase__ : Optional[Any] =['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase ).exists() os.remove(Path(lowerCamelCase ) )
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase : List[Any] = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ): """simple docstring""" lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase ) lowerCamelCase__ : str =range(1 , __lowerCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { "configuration_mobilenet_v2": [ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["MobileNetV2FeatureExtractor"] _lowercase : Any = ["MobileNetV2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys _lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case__ ( __lowerCamelCase : List[Any] ): """simple docstring""" if isinstance(__lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]: pass def snake_case ( self : List[str] )-> List[str]: pass def snake_case ( self : Optional[Any] )-> str: pass def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict: lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int: lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : int =output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase ) lowerCamelCase__ : List[str] =after_output[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-3 ) def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model} lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase ) lowerCamelCase__ : List[str] =model( input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : int =output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size ) lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size ) lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ : int =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ : List[Any] =output.text_model_output.attentions self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any: pt_model.to(lowerCamelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ : Any =inputs_dict lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple() lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase ) lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase ) lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase ) pt_model_loaded.to(lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 ) def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]: lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase ) lowerCamelCase__ : Tuple =fx_state self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]: lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase ) lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params ) self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Any =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : int =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase ) def snake_case ( self : Tuple )-> Any: lowerCamelCase__ : Tuple =self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase ) def snake_case ( self : str )-> Any: lowerCamelCase__ : str =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase ) @is_pt_flax_cross_test def snake_case ( self : Tuple )-> List[Any]: lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs() lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' ) lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' ) lowerCamelCase__ : Tuple =config_inputs_dict self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase ) self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase ) @slow def snake_case ( self : Optional[Any] )-> Tuple: lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs() lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[str] =outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase ) lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase ) lowerCamelCase__ : List[Any] =after_outputs[0] lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase, 1E-5 ) @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : List[str] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int: lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase ) lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : int )-> Optional[int]: lowerCamelCase__ : Any =FlaxViTModelTester(self ) lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, ) lowerCamelCase__ : Union[str, Any] =13 lowerCamelCase__ : Optional[Any] =floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) lowerCamelCase__ : str =random_attention_mask([batch_size, 4] ) lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict: lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase ) return vision_model, text_model def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self ) lowerCamelCase__ : List[Any] =FlaxBertModelTester(self ) lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self : Tuple )-> Optional[Any]: lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 ) lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase__ : Dict =processor( text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ) lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
625
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : Dict = {"tokenization_bertweet": ["BertweetTokenizer"]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): """simple docstring""" if index == number_of_items: return 0 lowerCamelCase__ : Optional[int] =0 lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ) if weights[index] <= max_weight: lowerCamelCase__ : Dict =values[index] + knapsack( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowercase : Optional[int] = 6_378_137.0 _lowercase : Any = 6_356_752.314_245 _lowercase : Any = 6_3_7_8_1_3_7 def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =(AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude lowerCamelCase__ : str =atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) ) lowerCamelCase__ : List[Any] =atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius lowerCamelCase__ : List[str] =haversine_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values lowerCamelCase__ : Any =(b_lata + b_lata) / 2 lowerCamelCase__ : List[Any] =(b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) lowerCamelCase__ : List[Any] =(sin(__lowerCamelCase ) ** 2) * (cos(__lowerCamelCase ) ** 2) lowerCamelCase__ : Any =cos(sigma / 2 ) ** 2 lowerCamelCase__ : List[str] =(sigma - sin(__lowerCamelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) lowerCamelCase__ : List[str] =(cos(__lowerCamelCase ) ** 2) * (sin(__lowerCamelCase ) ** 2) lowerCamelCase__ : Dict =sin(sigma / 2 ) ** 2 lowerCamelCase__ : List[str] =(sigma + sin(__lowerCamelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
625
"""simple docstring""" _lowercase : Optional[Any] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
625
1
"""simple docstring""" import argparse import os import re _lowercase : str = "src/transformers/models/auto" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict _lowercase : Optional[Any] = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict") # re pattern that matches identifiers in mappings _lowercase : int = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"") def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : bool = False ): """simple docstring""" with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase__ : Optional[Any] =f.read() lowerCamelCase__ : str =content.split('''\n''' ) lowerCamelCase__ : Optional[Any] =[] lowerCamelCase__ : List[Any] =0 while line_idx < len(__lowerCamelCase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCamelCase__ : Optional[int] =len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCamelCase__ : str =[] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCamelCase__ : Optional[Any] =line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCamelCase__ : Tuple =sorted(__lowerCamelCase , key=lambda __lowerCamelCase : _re_identifier.search(__lowerCamelCase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(__lowerCamelCase ) ) elif "\n".join(__lowerCamelCase ) != content: return True def snake_case__ ( __lowerCamelCase : bool = False ): """simple docstring""" lowerCamelCase__ : Optional[int] =[os.path.join(__lowerCamelCase , __lowerCamelCase ) for f in os.listdir(__lowerCamelCase ) if f.endswith('''.py''' )] lowerCamelCase__ : Any =[sort_auto_mapping(__lowerCamelCase , overwrite=__lowerCamelCase ) for fname in fnames] if not overwrite and any(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] =[f for f, d in zip(__lowerCamelCase , __lowerCamelCase ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(__lowerCamelCase )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": _lowercase : Dict = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") _lowercase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[int] ): """simple docstring""" if not numbers: return 0 if not isinstance(__lowerCamelCase , (list, tuple) ) or not all( isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCamelCase__ : Any =numbers[0] for i in range(1 , len(__lowerCamelCase ) ): # update the maximum and minimum subarray products lowerCamelCase__ : Dict =numbers[i] if number < 0: lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number ) lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number ) # update the maximum product found till now lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase ) return max_prod
625
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig _lowercase : List[str] = [ "openmmlab/upernet-convnext-tiny", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring _lowercase : int = "UperNetConfig" class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Union[int, Tuple[int, int]], lowerCamelCase : Union[int, Tuple[int, int], str] = 0, lowerCamelCase : bool = False, lowerCamelCase : Union[int, Tuple[int, int]] = 1, )-> None: super().__init__() lowerCamelCase__ : Tuple =nn.Convad( in_channels=lowerCamelCase, out_channels=lowerCamelCase, kernel_size=lowerCamelCase, padding=lowerCamelCase, bias=lowerCamelCase, dilation=lowerCamelCase, ) lowerCamelCase__ : str =nn.BatchNormad(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =nn.ReLU() def snake_case ( self : List[Any], lowerCamelCase : torch.Tensor )-> torch.Tensor: lowerCamelCase__ : List[Any] =self.conv(lowerCamelCase ) lowerCamelCase__ : Tuple =self.batch_norm(lowerCamelCase ) lowerCamelCase__ : Optional[int] =self.activation(lowerCamelCase ) return output class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int )-> None: super().__init__() lowerCamelCase__ : Dict =[ nn.AdaptiveAvgPoolad(lowerCamelCase ), UperNetConvModule(lowerCamelCase, lowerCamelCase, kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : str, lowerCamelCase : torch.Tensor )-> torch.Tensor: lowerCamelCase__ : Tuple =input for layer in self.layers: lowerCamelCase__ : Any =layer(lowerCamelCase ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any], lowerCamelCase : Tuple[int, ...], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : bool )-> None: super().__init__() lowerCamelCase__ : Dict =pool_scales lowerCamelCase__ : List[str] =align_corners lowerCamelCase__ : str =in_channels lowerCamelCase__ : Optional[Any] =channels lowerCamelCase__ : Any =[] for i, pool_scale in enumerate(lowerCamelCase ): lowerCamelCase__ : Any =UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase, in_channels=lowerCamelCase, channels=lowerCamelCase ) self.blocks.append(lowerCamelCase ) self.add_module(str(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : Optional[int], lowerCamelCase : torch.Tensor )-> List[torch.Tensor]: lowerCamelCase__ : Tuple =[] for ppm in self.blocks: lowerCamelCase__ : Optional[Any] =ppm(lowerCamelCase ) lowerCamelCase__ : Dict =nn.functional.interpolate( lowerCamelCase, size=x.size()[2:], mode='''bilinear''', align_corners=self.align_corners ) ppm_outs.append(lowerCamelCase ) return ppm_outs class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Any )-> Dict: super().__init__() lowerCamelCase__ : Dict =config lowerCamelCase__ : Optional[int] =config.pool_scales # e.g. (1, 2, 3, 6) lowerCamelCase__ : List[str] =in_channels lowerCamelCase__ : Tuple =config.hidden_size lowerCamelCase__ : int =False lowerCamelCase__ : str =nn.Convad(self.channels, config.num_labels, kernel_size=1 ) # PSP Module lowerCamelCase__ : Tuple =UperNetPyramidPoolingModule( self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, ) lowerCamelCase__ : List[Any] =UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, ) # FPN Module lowerCamelCase__ : Optional[int] =nn.ModuleList() lowerCamelCase__ : Optional[int] =nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowerCamelCase__ : Optional[int] =UperNetConvModule(lowerCamelCase, self.channels, kernel_size=1 ) lowerCamelCase__ : List[str] =UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 ) self.lateral_convs.append(lowerCamelCase ) self.fpn_convs.append(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =UperNetConvModule( len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, ) def snake_case ( self : Union[str, Any] )-> Any: self.apply(self._init_weights ) def snake_case ( self : str, lowerCamelCase : Dict )-> Any: if isinstance(lowerCamelCase, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def snake_case ( self : List[str], lowerCamelCase : Union[str, Any] )-> Any: lowerCamelCase__ : str =inputs[-1] lowerCamelCase__ : List[Any] =[x] psp_outs.extend(self.psp_modules(lowerCamelCase ) ) lowerCamelCase__ : str =torch.cat(lowerCamelCase, dim=1 ) lowerCamelCase__ : Union[str, Any] =self.bottleneck(lowerCamelCase ) return output def snake_case ( self : Optional[int], lowerCamelCase : torch.Tensor )-> torch.Tensor: # build laterals lowerCamelCase__ : Optional[Any] =[lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(lowerCamelCase ) ) # build top-down path lowerCamelCase__ : int =len(lowerCamelCase ) for i in range(used_backbone_levels - 1, 0, -1 ): lowerCamelCase__ : List[str] =laterals[i - 1].shape[2:] lowerCamelCase__ : Optional[Any] =laterals[i - 1] + nn.functional.interpolate( laterals[i], size=lowerCamelCase, mode='''bilinear''', align_corners=self.align_corners ) # build outputs lowerCamelCase__ : Any =[self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1, 0, -1 ): lowerCamelCase__ : Optional[int] =nn.functional.interpolate( fpn_outs[i], size=fpn_outs[0].shape[2:], mode='''bilinear''', align_corners=self.align_corners ) lowerCamelCase__ : Union[str, Any] =torch.cat(lowerCamelCase, dim=1 ) lowerCamelCase__ : Optional[int] =self.fpn_bottleneck(lowerCamelCase ) lowerCamelCase__ : Optional[int] =self.classifier(lowerCamelCase ) return output class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : int = 2, lowerCamelCase : int = 3, lowerCamelCase : Union[int, Tuple[int, int]] = 1 )-> None: super().__init__() lowerCamelCase__ : Optional[int] =config lowerCamelCase__ : Dict =config.auxiliary_in_channels lowerCamelCase__ : Any =config.auxiliary_channels lowerCamelCase__ : int =config.auxiliary_num_convs lowerCamelCase__ : int =config.auxiliary_concat_input lowerCamelCase__ : List[str] =in_index lowerCamelCase__ : Optional[Any] =(kernel_size // 2) * dilation lowerCamelCase__ : List[Any] =[] convs.append( UperNetConvModule( self.in_channels, self.channels, kernel_size=lowerCamelCase, padding=lowerCamelCase, dilation=lowerCamelCase ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels, self.channels, kernel_size=lowerCamelCase, padding=lowerCamelCase, dilation=lowerCamelCase ) ) if self.num_convs == 0: lowerCamelCase__ : str =nn.Identity() else: lowerCamelCase__ : Union[str, Any] =nn.Sequential(*lowerCamelCase ) if self.concat_input: lowerCamelCase__ : Any =UperNetConvModule( self.in_channels + self.channels, self.channels, kernel_size=lowerCamelCase, padding=kernel_size // 2 ) lowerCamelCase__ : List[str] =nn.Convad(self.channels, config.num_labels, kernel_size=1 ) def snake_case ( self : Any )-> List[str]: self.apply(self._init_weights ) def snake_case ( self : Tuple, lowerCamelCase : Tuple )-> List[Any]: if isinstance(lowerCamelCase, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def snake_case ( self : List[str], lowerCamelCase : torch.Tensor )-> torch.Tensor: # just take the relevant feature maps lowerCamelCase__ : List[str] =encoder_hidden_states[self.in_index] lowerCamelCase__ : Tuple =self.convs(lowerCamelCase ) if self.concat_input: lowerCamelCase__ : Optional[Any] =self.conv_cat(torch.cat([hidden_states, output], dim=1 ) ) lowerCamelCase__ : List[str] =self.classifier(lowerCamelCase ) return output class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = UperNetConfig _a = 'pixel_values' _a = True def snake_case ( self : Tuple, lowerCamelCase : str )-> Tuple: if isinstance(lowerCamelCase, lowerCamelCase ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def snake_case ( self : int )-> Optional[int]: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def snake_case ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int]=False )-> Any: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =value _lowercase : int = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowercase : Dict = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , lowerCAmelCase_ , ) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Tuple, lowerCamelCase : List[Any] )-> int: super().__init__(lowerCamelCase ) lowerCamelCase__ : Tuple =AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowerCamelCase__ : Any =UperNetHead(lowerCamelCase, in_channels=self.backbone.channels ) lowerCamelCase__ : Union[str, Any] =UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC ) def snake_case ( self : Optional[Any], lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[bool] = None, )-> Union[tuple, SemanticSegmenterOutput]: lowerCamelCase__ : int =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ : Any =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ : str =output_attentions if output_attentions is not None else self.config.output_attentions lowerCamelCase__ : int =self.backbone.forward_with_filtered_kwargs( lowerCamelCase, output_hidden_states=lowerCamelCase, output_attentions=lowerCamelCase ) lowerCamelCase__ : List[Any] =outputs.feature_maps lowerCamelCase__ : Tuple =self.decode_head(lowerCamelCase ) lowerCamelCase__ : Tuple =nn.functional.interpolate(lowerCamelCase, size=pixel_values.shape[2:], mode='''bilinear''', align_corners=lowerCamelCase ) lowerCamelCase__ : int =None if self.auxiliary_head is not None: lowerCamelCase__ : int =self.auxiliary_head(lowerCamelCase ) lowerCamelCase__ : Any =nn.functional.interpolate( lowerCamelCase, size=pixel_values.shape[2:], mode='''bilinear''', align_corners=lowerCamelCase ) lowerCamelCase__ : str =None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss lowerCamelCase__ : Union[str, Any] =CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowerCamelCase__ : Union[str, Any] =loss_fct(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Dict =loss_fct(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Dict =main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowerCamelCase__ : Optional[Any] =(logits,) + outputs[1:] else: lowerCamelCase__ : Tuple =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=lowerCamelCase, logits=lowerCamelCase, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
625
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 42 _a = 42 class __SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' _a = 42 _a = (1_6, 3_2, 9_6, 2_5_6) _a = jnp.floataa def snake_case ( self : Tuple )-> int: lowerCamelCase__ : Tuple =nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) lowerCamelCase__ : Dict =[] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ : Dict =self.block_out_channels[i] lowerCamelCase__ : Dict =self.block_out_channels[i + 1] lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Optional[int] =nn.Conv( lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(lowerCamelCase ) lowerCamelCase__ : Any =blocks lowerCamelCase__ : Optional[int] =nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase ) lowerCamelCase__ : Dict =nn.silu(lowerCamelCase ) for block in self.blocks: lowerCamelCase__ : str =block(lowerCamelCase ) lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase ) lowerCamelCase__ : Any =self.conv_out(lowerCamelCase ) return embedding @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' _a = 3_2 _a = 4 _a = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _a = False _a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _a = 2 _a = 8 _a = None _a = 1_2_8_0 _a = 0.0 _a = False _a = jnp.floataa _a = True _a = 0 _a = "rgb" _a = (1_6, 3_2, 9_6, 2_5_6) def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict: # init input tensors lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa ) lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase ) lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"] def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Optional[int] =self.block_out_channels lowerCamelCase__ : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : int =nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time lowerCamelCase__ : str =FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype ) lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) lowerCamelCase__ : Dict =self.only_cross_attention if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] =[] lowerCamelCase__ : Dict =[] lowerCamelCase__ : List[Any] =block_out_channels[0] lowerCamelCase__ : List[Any] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] =output_channel lowerCamelCase__ : str =block_out_channels[i] lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: lowerCamelCase__ : List[Any] =FlaxDownBlockaD( in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(lowerCamelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) if not is_final_block: lowerCamelCase__ : Any =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(lowerCamelCase ) lowerCamelCase__ : int =down_blocks lowerCamelCase__ : List[str] =controlnet_down_blocks # mid lowerCamelCase__ : Tuple =block_out_channels[-1] lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) lowerCamelCase__ : List[str] =nn.Conv( lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase__ : int =self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 ) # 1. time if not isinstance(lowerCamelCase, jnp.ndarray ): lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 ) lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase ) # 2. pre-process lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase ) lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) ) lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ : Union[str, Any] =(sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train ) # 5. contronet blocks lowerCamelCase__ : Optional[Any] =() for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ): lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : List[str] =controlnet_down_block_res_samples lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase ) # 6. scaling lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
625
1
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = TransfoXLTokenizer _a = False _a = False def snake_case ( self : Tuple )-> Tuple: super().setUp() lowerCamelCase__ : Tuple =[ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] lowerCamelCase__ : Any =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def snake_case ( self : List[str], **lowerCamelCase : Any )-> List[str]: lowerCamelCase__ : Tuple =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase ) def snake_case ( self : List[str], lowerCamelCase : Tuple )-> Optional[int]: lowerCamelCase__ : Optional[int] ='''<unk> UNwanted , running''' lowerCamelCase__ : Dict ='''<unk> unwanted, running''' return input_text, output_text def snake_case ( self : str )-> Optional[int]: lowerCamelCase__ : Optional[Any] =TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=lowerCamelCase ) lowerCamelCase__ : Any =tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(lowerCamelCase, ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [0, 4, 8, 7] ) def snake_case ( self : List[str] )-> List[str]: lowerCamelCase__ : Dict =TransfoXLTokenizer(lower_case=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def snake_case ( self : Dict )-> Optional[int]: lowerCamelCase__ : Optional[Any] =TransfoXLTokenizer(lower_case=lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ : Optional[Any] =TransfoXLTokenizer(lower_case=lowerCamelCase ) lowerCamelCase__ : Dict ='''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' lowerCamelCase__ : Dict =[ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(lowerCamelCase ), lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase ), lowerCamelCase ) def snake_case ( self : Tuple )-> int: lowerCamelCase__ : List[Any] =self.get_tokenizer() lowerCamelCase__ : Optional[int] =len(lowerCamelCase ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''', 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(lowerCamelCase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ), [1] ) self.assertEqual(tokenizer.decode([1] ), '''new1''' )
625
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ["CLIPFeatureExtractor"] _lowercase : int = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
625
1
"""simple docstring""" # Imports import numpy as np class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str], lowerCamelCase : str=None, lowerCamelCase : List[str]=None, lowerCamelCase : Any=None, lowerCamelCase : Any=None, lowerCamelCase : int=None )-> Any: self.set_matricies(red=lowerCamelCase, green=lowerCamelCase, blue=lowerCamelCase, red_edge=lowerCamelCase, nir=lowerCamelCase ) def snake_case ( self : str, lowerCamelCase : int=None, lowerCamelCase : Dict=None, lowerCamelCase : List[Any]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None )-> Optional[Any]: if red is not None: lowerCamelCase__ : List[Any] =red if green is not None: lowerCamelCase__ : Dict =green if blue is not None: lowerCamelCase__ : int =blue if red_edge is not None: lowerCamelCase__ : int =red_edge if nir is not None: lowerCamelCase__ : Any =nir return True def snake_case ( self : Dict, lowerCamelCase : Union[str, Any]="", lowerCamelCase : str=None, lowerCamelCase : str=None, lowerCamelCase : str=None, lowerCamelCase : Dict=None, lowerCamelCase : str=None )-> Optional[Any]: self.set_matricies(red=lowerCamelCase, green=lowerCamelCase, blue=lowerCamelCase, red_edge=lowerCamelCase, nir=lowerCamelCase ) lowerCamelCase__ : Optional[Any] ={ '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def snake_case ( self : Tuple )-> List[str]: return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def snake_case ( self : Any )-> Union[str, Any]: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def snake_case ( self : Union[str, Any] )-> int: return self.nir * (self.red / (self.green**2)) def snake_case ( self : Tuple )-> Optional[Any]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def snake_case ( self : Optional[Any] )-> int: return (self.nir - self.red) / (self.nir + self.red) def snake_case ( self : List[Any] )-> int: return (self.nir - self.blue) / (self.nir + self.blue) def snake_case ( self : Dict )-> str: return (self.redEdge - self.red) / (self.redEdge + self.red) def snake_case ( self : str )-> Optional[Any]: return (self.nir - self.green) / (self.nir + self.green) def snake_case ( self : Tuple )-> int: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def snake_case ( self : Union[str, Any] )-> Tuple: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def snake_case ( self : List[Any] )-> Union[str, Any]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def snake_case ( self : List[str] )-> Optional[int]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def snake_case ( self : int, lowerCamelCase : int=0.08, lowerCamelCase : int=1.22, lowerCamelCase : Any=0.03 )-> Optional[int]: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def snake_case ( self : Union[str, Any] )-> List[Any]: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def snake_case ( self : Dict )-> str: return (self.nir / self.green) - 1 def snake_case ( self : Dict )-> Optional[int]: return (self.nir / self.redEdge) - 1 def snake_case ( self : Dict )-> List[str]: return (self.red - self.blue) / self.red def snake_case ( self : int )-> Any: lowerCamelCase__ : List[str] =self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def snake_case ( self : List[Any] )-> int: return self.nir - self.green def snake_case ( self : List[str] )-> int: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def snake_case ( self : Union[str, Any] )-> str: lowerCamelCase__ : List[Any] =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def snake_case ( self : Any, lowerCamelCase : Optional[int]=0.16 )-> Tuple: return (self.nir - self.green) / (self.nir + self.green + y) def snake_case ( self : Any, lowerCamelCase : Tuple=0.5 )-> Union[str, Any]: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def snake_case ( self : Tuple )-> List[str]: return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def snake_case ( self : Tuple, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[Any]=None )-> List[Any]: return (self.nir - b) / (a * self.red) def snake_case ( self : Optional[int] )-> Tuple: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def snake_case ( self : List[Any] )-> List[Any]: return (self.red + self.green + self.blue) / 30.5 def snake_case ( self : Union[str, Any] )-> List[str]: return self.nir / self.red def snake_case ( self : List[Any] )-> str: return (self.rvi() - 1) / (self.rvi() + 1) def snake_case ( self : Tuple )-> int: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def snake_case ( self : List[Any] )-> List[str]: return self.green / (self.nir + self.red + self.green) def snake_case ( self : List[Any] )-> List[str]: return self.nir / (self.nir + self.red + self.green) def snake_case ( self : List[str] )-> Union[str, Any]: return self.red / (self.nir + self.red + self.green) def snake_case ( self : Optional[int] )-> Tuple: return (self.green - self.red) / (self.green + self.red) def snake_case ( self : str )-> Optional[Any]: return (self.red - self.green) / (self.red + self.green) def snake_case ( self : List[Any] )-> Optional[Any]: lowerCamelCase__ : Optional[Any] =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase__ : List[str] =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def snake_case ( self : Optional[Any] )-> Tuple: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def snake_case ( self : Any )-> Dict: return self.nir / self.red def snake_case ( self : List[str] )-> Tuple: return (self.ndvi() + 0.5) ** (1 / 2) def snake_case ( self : Optional[int] )-> Dict: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
625
"""simple docstring""" import os def snake_case__ ( ): """simple docstring""" with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file: lowerCamelCase__ : Tuple =str(file.readlines()[0] ) lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' ) names.sort() lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : str =0 for i, name in enumerate(__lowerCamelCase ): for letter in name: name_score += ord(__lowerCamelCase ) - 64 total_score += (i + 1) * name_score lowerCamelCase__ : Dict =0 return total_score if __name__ == "__main__": print(solution())
625
1
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" lowerCamelCase__ : Tuple =int(__lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =t // 3600, (t // 60) % 60, t % 60 return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}''' def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=300 ): """simple docstring""" # docstyle-ignore return f''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def snake_case__ ( __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : List[str] ='''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCamelCase__ : Any =f'''{elt:.6f}''' if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(__lowerCamelCase ) html_code += f''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = 5 _a = 0.2 def __init__( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[str] = None, lowerCamelCase : bool = True, lowerCamelCase : Optional["NotebookTrainingTracker"] = None, lowerCamelCase : int = 300, )-> Tuple: lowerCamelCase__ : List[Any] =total lowerCamelCase__ : int ='''''' if prefix is None else prefix lowerCamelCase__ : Dict =leave lowerCamelCase__ : Any =parent lowerCamelCase__ : Dict =width lowerCamelCase__ : str =None lowerCamelCase__ : str =None lowerCamelCase__ : Tuple =None def snake_case ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : bool = False, lowerCamelCase : str = None )-> List[str]: lowerCamelCase__ : Dict =value if comment is not None: lowerCamelCase__ : Optional[int] =comment if self.last_value is None: lowerCamelCase__ : str =time.time() lowerCamelCase__ : Optional[Any] =value lowerCamelCase__ : List[Any] =None lowerCamelCase__ : Dict =self.warmup lowerCamelCase__ : Tuple =1 self.update_bar(lowerCamelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ): if self.first_calls > 0: self.first_calls -= 1 lowerCamelCase__ : Optional[Any] =time.time() lowerCamelCase__ : Optional[Any] =current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCamelCase__ : Optional[Any] =self.elapsed_time / (value - self.start_value) else: lowerCamelCase__ : Tuple =None if value >= self.total: lowerCamelCase__ : Dict =self.total lowerCamelCase__ : Tuple =None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCamelCase__ : List[Any] =self.average_time_per_item * (self.total - value) self.update_bar(lowerCamelCase ) lowerCamelCase__ : Any =value lowerCamelCase__ : List[str] =current_time if self.average_time_per_item is None: lowerCamelCase__ : str =1 else: lowerCamelCase__ : str =max(int(self.update_every / self.average_time_per_item ), 1 ) def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : Dict=None )-> Optional[int]: lowerCamelCase__ : int =''' ''' * (len(str(self.total ) ) - len(str(lowerCamelCase ) )) + str(lowerCamelCase ) if self.elapsed_time is None: lowerCamelCase__ : Optional[int] =F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: lowerCamelCase__ : Optional[Any] =F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: lowerCamelCase__ : Dict =( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' F''' {format_time(self.predicted_remaining )}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]''' self.display() def snake_case ( self : List[Any] )-> str: lowerCamelCase__ : Optional[int] =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCamelCase__ : Tuple =disp.display(disp.HTML(self.html_code ), display_id=lowerCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case ( self : Union[str, Any] )-> Any: if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Any, lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any]=None )-> str: super().__init__(lowerCamelCase ) lowerCamelCase__ : List[Any] =None if column_names is None else [column_names] lowerCamelCase__ : Tuple =None def snake_case ( self : int )-> int: lowerCamelCase__ : Union[str, Any] =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCamelCase__ : Union[str, Any] =disp.display(disp.HTML(self.html_code ), display_id=lowerCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case ( self : List[Any], lowerCamelCase : Any )-> Optional[int]: if self.inner_table is None: lowerCamelCase__ : List[str] =[list(values.keys() ), list(values.values() )] else: lowerCamelCase__ : Tuple =self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowerCamelCase ) lowerCamelCase__ : List[str] =columns self.inner_table.append([values[c] for c in columns] ) def snake_case ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int]=None, lowerCamelCase : Tuple=300 )-> List[Any]: lowerCamelCase__ : Optional[Any] =NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase ) return self.child_bar def snake_case ( self : Union[str, Any] )-> List[str]: lowerCamelCase__ : List[str] =None self.display() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[int] )-> Dict: lowerCamelCase__ : Optional[int] =None lowerCamelCase__ : List[Any] =None lowerCamelCase__ : Dict =False def snake_case ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Tuple, **lowerCamelCase : List[Any] )-> List[str]: lowerCamelCase__ : Optional[int] ='''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : Tuple =0 lowerCamelCase__ : List[Any] =[self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) lowerCamelCase__ : List[str] =NotebookTrainingTracker(state.max_steps, lowerCamelCase ) def snake_case ( self : str, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] )-> List[str]: lowerCamelCase__ : Optional[int] =int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, ) lowerCamelCase__ : Optional[Any] =False def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[Any] )-> Optional[int]: if not has_length(lowerCamelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCamelCase__ : Any =self.training_tracker.add_child(len(lowerCamelCase ) ) else: lowerCamelCase__ : str =NotebookProgressBar(len(lowerCamelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def snake_case ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : str, **lowerCamelCase : List[Any] )-> Union[str, Any]: if self.prediction_bar is not None: self.prediction_bar.close() lowerCamelCase__ : List[Any] =None def snake_case ( self : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : List[Any]=None, **lowerCamelCase : Dict )-> List[Any]: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCamelCase__ : Dict ={'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy lowerCamelCase__ : Tuple =state.global_step self.training_tracker.write_line(lowerCamelCase ) def snake_case ( self : Dict, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : Optional[int] )-> Optional[int]: if self.training_tracker is not None: lowerCamelCase__ : str ={'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: lowerCamelCase__ : Any =log['''loss'''] break if self.first_column == "Epoch": lowerCamelCase__ : str =int(state.epoch ) else: lowerCamelCase__ : List[Any] =state.global_step lowerCamelCase__ : List[Any] ='''eval''' for k in metrics: if k.endswith('''_loss''' ): lowerCamelCase__ : str =re.sub(r'''\_loss$''', '''''', lowerCamelCase ) lowerCamelCase__ : Dict =metrics.pop('''total_flos''', lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =metrics.pop('''epoch''', lowerCamelCase ) lowerCamelCase__ : str =metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase ) lowerCamelCase__ : Tuple =metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase ) lowerCamelCase__ : Optional[Any] =metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase ) lowerCamelCase__ : Any =metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase ) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': lowerCamelCase__ : Any =v else: lowerCamelCase__ : List[Any] =k.split('''_''' ) lowerCamelCase__ : Tuple =''' '''.join([part.capitalize() for part in splits[1:]] ) lowerCamelCase__ : List[str] =v self.training_tracker.write_line(lowerCamelCase ) self.training_tracker.remove_child() lowerCamelCase__ : List[Any] =None # Evaluation takes a long time so we should force the next update. lowerCamelCase__ : Union[str, Any] =True def snake_case ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : str, **lowerCamelCase : str )-> Union[str, Any]: self.training_tracker.update( state.global_step, comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''', force_update=lowerCamelCase ) lowerCamelCase__ : int =None
625
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str, lowerCamelCase : int )-> None: lowerCamelCase__ : str =value lowerCamelCase__ : Node | None =None lowerCamelCase__ : Node | None =None class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int, lowerCamelCase : Node )-> None: lowerCamelCase__ : Any =tree def snake_case ( self : str, lowerCamelCase : Node | None )-> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Dict )-> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
625
1
"""simple docstring""" from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' _a = 42 # [batch_size x 3] _a = 42 # [batch_size x 3] _a = 42 # [batch_size x 3] _a = 42 # [batch_size x 3] _a = 42 _a = 42 _a = 42 _a = 42 _a = 42 def snake_case ( self : Tuple )-> List[Any]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def snake_case ( self : int )-> List[str]: return torch.from_numpy(np.array([self.width, self.height], dtype=np.floataa ) ) def snake_case ( self : Tuple )-> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.floataa ) ) def snake_case ( self : Any )-> torch.Tensor: lowerCamelCase__ : Any =torch.arange(self.height * self.width ) lowerCamelCase__ : Optional[Any] =torch.stack( [ pixel_indices % self.width, torch.div(lowerCamelCase, self.width, rounding_mode='''trunc''' ), ], axis=1, ) return coords @property def snake_case ( self : Tuple )-> Any: lowerCamelCase__ , *lowerCamelCase__ : Tuple =self.shape lowerCamelCase__ : Union[str, Any] =int(np.prod(lowerCamelCase ) ) lowerCamelCase__ : str =self.get_image_coords() lowerCamelCase__ : Optional[int] =torch.broadcast_to(coords.unsqueeze(0 ), [batch_size * inner_batch_size, *coords.shape] ) lowerCamelCase__ : Dict =self.get_camera_rays(lowerCamelCase ) lowerCamelCase__ : Dict =rays.view(lowerCamelCase, inner_batch_size * self.height * self.width, 2, 3 ) return rays def snake_case ( self : Any, lowerCamelCase : torch.Tensor )-> torch.Tensor: lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__ : List[Any] =coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] lowerCamelCase__ : Optional[int] =coords.view(lowerCamelCase, -1, 2 ) lowerCamelCase__ : int =self.resolution() lowerCamelCase__ : Dict =self.fov() lowerCamelCase__ : Union[str, Any] =(flat.float() / (res - 1)) * 2 - 1 lowerCamelCase__ : int =fracs * torch.tan(fov / 2 ) lowerCamelCase__ : Dict =fracs.view(lowerCamelCase, -1, 2 ) lowerCamelCase__ : List[str] =( self.z.view(lowerCamelCase, 1, 3 ) + self.x.view(lowerCamelCase, 1, 3 ) * fracs[:, :, :1] + self.y.view(lowerCamelCase, 1, 3 ) * fracs[:, :, 1:] ) lowerCamelCase__ : Optional[Any] =directions / directions.norm(dim=-1, keepdim=lowerCamelCase ) lowerCamelCase__ : Optional[Any] =torch.stack( [ torch.broadcast_to(self.origin.view(lowerCamelCase, 1, 3 ), [batch_size, directions.shape[1], 3] ), directions, ], dim=2, ) return rays.view(lowerCamelCase, *lowerCamelCase, 2, 3 ) def snake_case ( self : Optional[Any], lowerCamelCase : int, lowerCamelCase : int )-> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin, x=self.x, y=self.y, z=self.z, width=lowerCamelCase, height=lowerCamelCase, x_fov=self.x_fov, y_fov=self.y_fov, ) def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : List[Any] =[] lowerCamelCase__ : Optional[int] =[] lowerCamelCase__ : int =[] lowerCamelCase__ : List[Any] =[] for theta in np.linspace(0 , 2 * np.pi , num=20 ): lowerCamelCase__ : Optional[int] =np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) lowerCamelCase__ : Union[str, Any] =-z * 4 lowerCamelCase__ : Dict =np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] ) lowerCamelCase__ : str =np.cross(__lowerCamelCase , __lowerCamelCase ) origins.append(__lowerCamelCase ) xs.append(__lowerCamelCase ) ys.append(__lowerCamelCase ) zs.append(__lowerCamelCase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
625
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowercase : List[str] = logging.getLogger(__name__) def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ): """simple docstring""" # save results if os.path.exists(__lowerCamelCase ): if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =2 if unlogit: lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase ) lowerCamelCase__ : Tuple =0 return -plogp.sum(dim=-1 ) def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) ) for row in range(len(__lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device ) if head_mask is None: lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase__ : Union[str, Any] =None lowerCamelCase__ : List[str] =0.0 lowerCamelCase__ : Union[str, Any] =0.0 for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase__) , ) : Any =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase__ : int =2 lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCamelCase ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase__ : Dict =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase ) print_ad_tensor(__lowerCamelCase ) return attn_entropy, head_importance, total_loss def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase ) lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold ) lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase__ : List[Any] =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase__ : int =float('''Inf''' ) lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1] if len(__lowerCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 ) lowerCamelCase__ : Optional[Any] =0.0 lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase ) lowerCamelCase__ : Tuple =new_head_mask.clone().detach() print_ad_tensor(__lowerCamelCase ) # Compute metric and head importance again lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Any =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" lowerCamelCase__ : str =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : Tuple =1 / loss lowerCamelCase__ : Optional[Any] =datetime.now() - before_time lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] =[ v, ] assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCamelCase ) lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() ) lowerCamelCase__ : Any =datetime.now() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , ) lowerCamelCase__ : str =1 / loss lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCamelCase , args.output_dir ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase__ : List[Any] =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank ) lowerCamelCase__ : Any =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel( __lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase ) elif args.n_gpu > 1: lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase ) # Prepare dataset lowerCamelCase__ : Union[str, Any] =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),) lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase ) lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase ) lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
625
1
"""simple docstring""" class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any, lowerCamelCase : int )-> List[str]: lowerCamelCase__ : Any =n lowerCamelCase__ : Tuple =[None] * self.n lowerCamelCase__ : Tuple =0 # index of the first element lowerCamelCase__ : Union[str, Any] =0 lowerCamelCase__ : Union[str, Any] =0 def __len__( self : Optional[Any] )-> int: return self.size def snake_case ( self : str )-> bool: return self.size == 0 def snake_case ( self : str )-> str: return False if self.is_empty() else self.array[self.front] def snake_case ( self : List[str], lowerCamelCase : Optional[Any] )-> Optional[Any]: if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) lowerCamelCase__ : Any =data lowerCamelCase__ : int =(self.rear + 1) % self.n self.size += 1 return self def snake_case ( self : List[Any] )-> Dict: if self.size == 0: raise Exception('''UNDERFLOW''' ) lowerCamelCase__ : Dict =self.array[self.front] lowerCamelCase__ : Optional[int] =None lowerCamelCase__ : List[str] =(self.front + 1) % self.n self.size -= 1 return temp
625
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowerCamelCase__ : List[str] ='''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : List[Any] ='''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : int =tax_attention_key lowerCamelCase__ : Optional[int] =tax_attention_out lowerCamelCase__ : List[Any] =tax_attention_query lowerCamelCase__ : Optional[Any] =tax_attention_value lowerCamelCase__ : List[str] =tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : Optional[int] =tax_mlp_wi_a lowerCamelCase__ : Optional[int] =tax_mlp_wi_a else: lowerCamelCase__ : Union[str, Any] =tax_mlp_wi lowerCamelCase__ : str =tax_mlp_wo lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : str =tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowerCamelCase__ : List[Any] =tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}''' # Self-Attention lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel'''] lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel'''] lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel'''] lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer'''] lowerCamelCase__ : Union[str, Any] =tax_attention_key lowerCamelCase__ : str =tax_attention_out lowerCamelCase__ : Optional[int] =tax_attention_query lowerCamelCase__ : Dict =tax_attention_value lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key lowerCamelCase__ : Any =tax_enc_dec_attention_out lowerCamelCase__ : Any =tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value lowerCamelCase__ : Dict =tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : Tuple =tax_mlp_wi_a lowerCamelCase__ : int =tax_mlp_wi_a else: lowerCamelCase__ : List[Any] =tax_mlp_wi lowerCamelCase__ : Dict =tax_mlp_wo lowerCamelCase__ : Tuple =txa_mlp_layer_norm lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowerCamelCase__ : int =txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowerCamelCase__ : Tuple =tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding'''] lowerCamelCase__ : Dict =txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__lowerCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) _lowercase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
625
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = ['image_processor', 'tokenizer'] _a = 'BlipImageProcessor' _a = 'AutoTokenizer' def __init__( self : str, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : str )-> Dict: super().__init__(lowerCamelCase, lowerCamelCase ) # add QFormer tokenizer lowerCamelCase__ : str =qformer_tokenizer def __call__( self : List[Any], lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Optional[int], )-> BatchFeature: if images is None and text is None: raise ValueError('''You have to specify at least images or text.''' ) lowerCamelCase__ : Optional[int] =BatchFeature() if text is not None: lowerCamelCase__ : Tuple =self.tokenizer( text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) encoding.update(lowerCamelCase ) lowerCamelCase__ : Optional[int] =self.qformer_tokenizer( text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) lowerCamelCase__ : List[Any] =qformer_text_encoding.pop('''input_ids''' ) lowerCamelCase__ : List[str] =qformer_text_encoding.pop('''attention_mask''' ) if images is not None: lowerCamelCase__ : Optional[int] =self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase ) encoding.update(lowerCamelCase ) return encoding def snake_case ( self : Any, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> Any: return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Optional[int], *lowerCamelCase : Optional[Any], **lowerCamelCase : Dict )-> str: return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def snake_case ( self : List[str] )-> int: lowerCamelCase__ : Optional[int] =self.tokenizer.model_input_names lowerCamelCase__ : Optional[Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def snake_case ( self : str, lowerCamelCase : Tuple, **lowerCamelCase : Union[str, Any] )-> List[Any]: if os.path.isfile(lowerCamelCase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase ) lowerCamelCase__ : Any =os.path.join(lowerCamelCase, '''qformer_tokenizer''' ) self.qformer_tokenizer.save_pretrained(lowerCamelCase ) return super().save_pretrained(lowerCamelCase, **lowerCamelCase ) @classmethod def snake_case ( cls : int, lowerCamelCase : Any, **lowerCamelCase : Dict )-> List[str]: lowerCamelCase__ : int =AutoTokenizer.from_pretrained(lowerCamelCase, subfolder='''qformer_tokenizer''' ) lowerCamelCase__ : Union[str, Any] =cls._get_arguments_from_pretrained(lowerCamelCase, **lowerCamelCase ) args.append(lowerCamelCase ) return cls(*lowerCamelCase )
625
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]: lowerCamelCase__ : List[str] =parent lowerCamelCase__ : Tuple =batch_size lowerCamelCase__ : str =image_size lowerCamelCase__ : Any =num_channels lowerCamelCase__ : Tuple =num_stages lowerCamelCase__ : List[str] =hidden_sizes lowerCamelCase__ : Any =depths lowerCamelCase__ : Union[str, Any] =is_training lowerCamelCase__ : Tuple =use_labels lowerCamelCase__ : int =intermediate_size lowerCamelCase__ : Optional[int] =hidden_act lowerCamelCase__ : Dict =type_sequence_label_size lowerCamelCase__ : Tuple =initializer_range lowerCamelCase__ : Any =out_features lowerCamelCase__ : Tuple =num_labels lowerCamelCase__ : Optional[int] =scope lowerCamelCase__ : Optional[int] =num_stages def snake_case ( self : str )-> Optional[int]: lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple =None if self.use_labels: lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : int =self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] )-> Any: return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def snake_case ( self : Union[str, Any] )-> Any: return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, ) def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple: lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase__ : int =model(lowerCamelCase ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self : Any )-> Tuple: lowerCamelCase__ : Dict =self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any =config_and_inputs lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _a = (UperNetForSemanticSegmentation,) if is_torch_available() else () _a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def snake_case ( self : Optional[int] )-> Optional[int]: lowerCamelCase__ : Optional[Any] =UperNetModelTester(self ) lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 ) def snake_case ( self : Optional[int] )-> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] )-> Dict: return def snake_case ( self : Optional[int] )-> List[str]: lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) lowerCamelCase__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple =[*signature.parameters.keys()] lowerCamelCase__ : List[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCamelCase ) def snake_case ( self : Any )-> Union[str, Any]: lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def snake_case ( self : Optional[Any] )-> List[Any]: pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def snake_case ( self : Any )-> List[str]: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : int )-> Any: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def snake_case ( self : Dict )-> str: pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def snake_case ( self : List[Any] )-> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def snake_case ( self : Tuple )-> str: pass def snake_case ( self : Optional[int] )-> List[str]: def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ): lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) ) lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : List[str] =self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[Any] =True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase ) def snake_case ( self : Any )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : str =_config_zero_init(lowerCamelCase ) lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def snake_case ( self : Any )-> str: pass @slow def snake_case ( self : int )-> Union[str, Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def snake_case__ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] =hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : str )-> Union[str, Any]: lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : List[Any] =prepare_img() lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : List[Any] =model(**lowerCamelCase ) lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : Dict =torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) ) def snake_case ( self : Optional[int] )-> Optional[Any]: lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase ) lowerCamelCase__ : Dict =prepare_img() lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Any =model(**lowerCamelCase ) lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowerCamelCase ) lowerCamelCase__ : List[str] =torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
625
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name _lowercase : Any = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n" def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=8 ): """simple docstring""" lowerCamelCase__ : List[Any] =height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowerCamelCase__ : List[str] =width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=512 ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) lowerCamelCase__ : List[str] =np.array(pil_image.convert('''RGB''' ) ) lowerCamelCase__ : int =arr.astype(np.floataa ) / 1_27.5 - 1 lowerCamelCase__ : Optional[Any] =np.transpose(__lowerCamelCase , [2, 0, 1] ) lowerCamelCase__ : List[str] =torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ) return image class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : UNetaDConditionModel, lowerCamelCase : DDPMScheduler, lowerCamelCase : VQModel, )-> Optional[Any]: super().__init__() self.register_modules( unet=lowerCamelCase, scheduler=lowerCamelCase, movq=lowerCamelCase, ) lowerCamelCase__ : Optional[Any] =2 ** (len(self.movq.config.block_out_channels ) - 1) def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : str )-> Dict: # get the original timestep using init_timestep lowerCamelCase__ : List[Any] =min(int(num_inference_steps * strength ), lowerCamelCase ) lowerCamelCase__ : Tuple =max(num_inference_steps - init_timestep, 0 ) lowerCamelCase__ : str =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int]=None )-> Union[str, Any]: if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}''' ) lowerCamelCase__ : List[str] =image.to(device=lowerCamelCase, dtype=lowerCamelCase ) lowerCamelCase__ : Dict =batch_size * num_images_per_prompt if image.shape[1] == 4: lowerCamelCase__ : List[str] =image else: if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =[ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase ) ] lowerCamelCase__ : List[Any] =torch.cat(lowerCamelCase, dim=0 ) else: lowerCamelCase__ : int =self.movq.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase ) lowerCamelCase__ : Optional[Any] =self.movq.config.scaling_factor * init_latents lowerCamelCase__ : Optional[int] =torch.cat([init_latents], dim=0 ) lowerCamelCase__ : Tuple =init_latents.shape lowerCamelCase__ : Union[str, Any] =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase ) # get latents lowerCamelCase__ : Any =self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Tuple =init_latents return latents def snake_case ( self : Optional[int], lowerCamelCase : int=0 )-> List[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowerCamelCase__ : Optional[Any] =torch.device(F'''cuda:{gpu_id}''' ) lowerCamelCase__ : str =[ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase, lowerCamelCase ) def snake_case ( self : List[str], lowerCamelCase : str=0 )-> List[Any]: if is_accelerate_available() and is_accelerate_version('''>=''', '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) lowerCamelCase__ : List[str] =torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''', silence_dtype_warnings=lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCamelCase__ : str =None for cpu_offloaded_model in [self.unet, self.movq]: lowerCamelCase__ , lowerCamelCase__ : int =cpu_offload_with_hook(lowerCamelCase, lowerCamelCase, prev_module_hook=lowerCamelCase ) # We'll offload the last model manually. lowerCamelCase__ : int =hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self : Optional[Any] )-> List[str]: if not hasattr(self.unet, '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase, '''_hf_hook''' ) and hasattr(module._hf_hook, '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase ) def __call__( self : List[str], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 100, lowerCamelCase : float = 4.0, lowerCamelCase : float = 0.3, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, )-> List[str]: lowerCamelCase__ : Tuple =self._execution_device lowerCamelCase__ : str =guidance_scale > 1.0 if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : List[str] =torch.cat(lowerCamelCase, dim=0 ) lowerCamelCase__ : Any =image_embeds.shape[0] if isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =torch.cat(lowerCamelCase, dim=0 ) if do_classifier_free_guidance: lowerCamelCase__ : Dict =image_embeds.repeat_interleave(lowerCamelCase, dim=0 ) lowerCamelCase__ : Optional[int] =negative_image_embeds.repeat_interleave(lowerCamelCase, dim=0 ) lowerCamelCase__ : Any =torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase ) if not isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : Optional[int] =[image] if not all(isinstance(lowerCamelCase, (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'''Input is in incorrect format: {[type(lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) lowerCamelCase__ : Union[str, Any] =torch.cat([prepare_image(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for i in image], dim=0 ) lowerCamelCase__ : int =image.to(dtype=image_embeds.dtype, device=lowerCamelCase ) lowerCamelCase__ : Tuple =self.movq.encode(lowerCamelCase )['''latents'''] lowerCamelCase__ : int =latents.repeat_interleave(lowerCamelCase, dim=0 ) self.scheduler.set_timesteps(lowerCamelCase, device=lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Tuple =self.get_timesteps(lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Any =timesteps[:1].repeat(batch_size * num_images_per_prompt ) lowerCamelCase__ , lowerCamelCase__ : str =downscale_height_and_width(lowerCamelCase, lowerCamelCase, self.movq_scale_factor ) lowerCamelCase__ : Union[str, Any] =self.prepare_latents( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, image_embeds.dtype, lowerCamelCase, lowerCamelCase ) for i, t in enumerate(self.progress_bar(lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase__ : Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase__ : Dict ={'''image_embeds''': image_embeds} lowerCamelCase__ : int =self.unet( sample=lowerCamelCase, timestep=lowerCamelCase, encoder_hidden_states=lowerCamelCase, added_cond_kwargs=lowerCamelCase, return_dict=lowerCamelCase, )[0] if do_classifier_free_guidance: lowerCamelCase__ , lowerCamelCase__ : Dict =noise_pred.split(latents.shape[1], dim=1 ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =noise_pred.chunk(2 ) lowerCamelCase__ , lowerCamelCase__ : int =variance_pred.chunk(2 ) lowerCamelCase__ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCamelCase__ : Union[str, Any] =torch.cat([noise_pred, variance_pred_text], dim=1 ) if not ( hasattr(self.scheduler.config, '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCamelCase__ , lowerCamelCase__ : List[str] =noise_pred.split(latents.shape[1], dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase__ : int =self.scheduler.step( lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase, )[0] # post-processing lowerCamelCase__ : Union[str, Any] =self.movq.decode(lowerCamelCase, force_not_quantize=lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowerCamelCase__ : Dict =image * 0.5 + 0.5 lowerCamelCase__ : str =image.clamp(0, 1 ) lowerCamelCase__ : List[str] =image.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": lowerCamelCase__ : Tuple =self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
625
"""simple docstring""" from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ): '''simple docstring''' _a = ['onnx'] def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]: requires_backends(self, ['''onnx'''] ) @classmethod def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]: requires_backends(cls, ['''onnx'''] ) @classmethod def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]: requires_backends(cls, ['''onnx'''] )
625
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'WhisperFeatureExtractor' _a = 'WhisperTokenizer' def __init__( self : List[str], lowerCamelCase : Any, lowerCamelCase : Any )-> str: super().__init__(lowerCamelCase, lowerCamelCase ) lowerCamelCase__ : Dict =self.feature_extractor lowerCamelCase__ : List[Any] =False def snake_case ( self : Tuple, lowerCamelCase : Any=None, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True )-> Dict: return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase, language=lowerCamelCase, no_timestamps=lowerCamelCase ) def __call__( self : str, *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCamelCase, **lowerCamelCase ) lowerCamelCase__ : Optional[int] =kwargs.pop('''audio''', lowerCamelCase ) lowerCamelCase__ : List[Any] =kwargs.pop('''sampling_rate''', lowerCamelCase ) lowerCamelCase__ : List[Any] =kwargs.pop('''text''', lowerCamelCase ) if len(lowerCamelCase ) > 0: lowerCamelCase__ : List[str] =args[0] lowerCamelCase__ : List[str] =args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowerCamelCase__ : Dict =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase ) if text is not None: lowerCamelCase__ : Union[str, Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: lowerCamelCase__ : Dict =encodings['''input_ids'''] return inputs def snake_case ( self : List[Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] )-> Dict: return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Dict, *lowerCamelCase : Tuple, **lowerCamelCase : str )-> str: return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : List[Any]="np" )-> Tuple: return self.tokenizer.get_prompt_ids(lowerCamelCase, return_tensors=lowerCamelCase )
625
"""simple docstring""" import colorsys from PIL import Image # type: ignore def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] =x lowerCamelCase__ : Any =y for step in range(__lowerCamelCase ): # noqa: B007 lowerCamelCase__ : List[Any] =a * a - b * b + x lowerCamelCase__ : Optional[int] =2 * a * b + y lowerCamelCase__ : Union[str, Any] =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case__ ( __lowerCamelCase : float ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ): """simple docstring""" lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) ) lowerCamelCase__ : Optional[int] =img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase ) else: lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
625
1