code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Dict=3 , UpperCamelCase__: str=18 , UpperCamelCase__: str=30 , UpperCamelCase__: str=400 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: Union[str, Any]=True , ): lowerCamelCase__ : List[Any] = parent lowerCamelCase__ : Union[str, Any] = batch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : Tuple = image_size lowerCamelCase__ : Dict = min_resolution lowerCamelCase__ : List[Any] = max_resolution lowerCamelCase__ : Union[str, Any] = do_resize lowerCamelCase__ : int = size_divisor lowerCamelCase__ : List[Any] = do_rescale def lowerCamelCase_ ( self: List[str] ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class _lowercase ( _lowercase , unittest.TestCase ): a = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self: str ): lowerCamelCase__ : List[str] = GLPNImageProcessingTester(self ) @property def lowerCamelCase_ ( self: Dict ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_rescale""" ) ) def lowerCamelCase_ ( self: List[Any] ): pass def lowerCamelCase_ ( self: Union[str, Any] ): # Initialize image_processing lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase_ ( self: int ): # Initialize image_processing lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase_ ( self: List[Any] ): # Initialize image_processing lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
631
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
631
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: lowerCamelCase__ : List[str] = {} lowerCamelCase__ : Optional[Any] = job["""started_at"""] lowerCamelCase__ : List[Any] = job["""completed_at"""] lowerCamelCase__ : Union[str, Any] = date_parser.parse(UpperCamelCase ) lowerCamelCase__ : Dict = date_parser.parse(UpperCamelCase ) lowerCamelCase__ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCamelCase__ : Union[str, Any] = start lowerCamelCase__ : Any = end lowerCamelCase__ : List[Any] = duration_in_min return job_info def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None ) -> Tuple: lowerCamelCase__ : Optional[Any] = None if token is not None: lowerCamelCase__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} lowerCamelCase__ : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' lowerCamelCase__ : Optional[int] = requests.get(UpperCamelCase , headers=UpperCamelCase ).json() lowerCamelCase__ : List[Any] = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(UpperCamelCase ) for job in result["""jobs"""]} ) lowerCamelCase__ : List[str] = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(UpperCamelCase ): lowerCamelCase__ : Optional[int] = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase ).json() job_time.update({job["""name"""]: extract_time_from_single_job(UpperCamelCase ) for job in result["""jobs"""]} ) return job_time except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": _A : List[str] =argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') _A : Dict =parser.parse_args() _A : Optional[Any] =get_job_time(args.workflow_run_id) _A : Optional[int] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'{k}: {v["duration"]}')
631
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : List[str] = mask_ratio lowerCamelCase__ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a = False a = False a = False a = False def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Dict ): pass def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) lowerCamelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Tuple = pt_noise super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy() lowerCamelCase__ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) # Make sure we don't have nans lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy() lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @slow def lowerCamelCase_ ( self: List[str] ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Tuple ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : List[str] = ViTMAEConfig() lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) ) # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
631
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(UpperCamelCase__ )["""last_hidden_state"""] lowerCamelCase__ : Optional[int] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
631
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( _lowercase ): a = """""" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ): super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : List[Any] = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] ) lowerCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" ) def lowerCamelCase_ ( self: Tuple ): if self.dir_cache is None: lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} lowerCamelCase__ : int = {f["""name"""]: f} def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): return self.file.open().read() def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _lowercase ( _lowercase ): a = """bz2""" a = """bz2""" a = """.bz2""" class _lowercase ( _lowercase ): a = """gzip""" a = """gzip""" a = """.gz""" class _lowercase ( _lowercase ): a = """lz4""" a = """lz4""" a = """.lz4""" class _lowercase ( _lowercase ): a = """xz""" a = """xz""" a = """.xz""" class _lowercase ( _lowercase ): a = """zstd""" a = """zstd""" a = """.zst""" def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ): super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = file_ def __enter__( self: List[Any] ): self._file.__enter__() return self def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ): self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self: Any ): return iter(self._file ) def lowerCamelCase_ ( self: List[Any] ): return next(self._file ) def __getattr__( self: List[str] , UpperCamelCase__: Dict ): return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) lowerCamelCase__ : Optional[Any] = fixed_enter
631
1
'''simple docstring''' import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _A : Any =logging.get_logger(__name__) # General docstring _A : int ='''PoolFormerConfig''' # Base docstring _A : int ='''sail/poolformer_s12''' _A : Optional[int] =[1, 512, 7, 7] # Image classification docstring _A : List[str] ='''sail/poolformer_s12''' _A : Optional[Any] ='''tabby, tabby cat''' _A : Union[str, Any] =[ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = False ) -> Any: if drop_prob == 0.0 or not training: return input lowerCamelCase__ : Optional[int] = 1 - drop_prob lowerCamelCase__ : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowerCamelCase__ : Optional[Any] = keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowerCamelCase__ : Optional[int] = input.div(UpperCamelCase ) * random_tensor return output class _lowercase ( nn.Module ): def __init__( self: List[Any] , UpperCamelCase__: Optional[float] = None ): super().__init__() lowerCamelCase__ : Tuple = drop_prob def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: torch.Tensor ): return drop_path(UpperCamelCase__ , self.drop_prob , self.training ) def lowerCamelCase_ ( self: Tuple ): return "p={}".format(self.drop_prob ) class _lowercase ( nn.Module ): def __init__( self: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any=None ): super().__init__() lowerCamelCase__ : str = patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size) lowerCamelCase__ : str = stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride) lowerCamelCase__ : List[str] = padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding) lowerCamelCase__ : Any = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ ) lowerCamelCase__ : Dict = norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity() def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any ): lowerCamelCase__ : List[str] = self.projection(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = self.norm(UpperCamelCase__ ) return embeddings class _lowercase ( nn.GroupNorm ): def __init__( self: Optional[int] , UpperCamelCase__: Dict , **UpperCamelCase__: Optional[int] ): super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ ) class _lowercase ( nn.Module ): def __init__( self: Any , UpperCamelCase__: Optional[int] ): super().__init__() lowerCamelCase__ : Tuple = nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple ): return self.pool(UpperCamelCase__ ) - hidden_states class _lowercase ( nn.Module ): def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] ): super().__init__() lowerCamelCase__ : str = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) lowerCamelCase__ : Optional[int] = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) lowerCamelCase__ : Optional[Any] = PoolFormerDropPath(UpperCamelCase__ ) if isinstance(config.hidden_act , UpperCamelCase__ ): lowerCamelCase__ : Tuple = ACTaFN[config.hidden_act] else: lowerCamelCase__ : int = config.hidden_act def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Tuple = self.conva(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.act_fn(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.drop(UpperCamelCase__ ) lowerCamelCase__ : str = self.conva(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.drop(UpperCamelCase__ ) return hidden_states class _lowercase ( nn.Module ): def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ): super().__init__() lowerCamelCase__ : List[Any] = PoolFormerPooling(UpperCamelCase__ ) lowerCamelCase__ : Tuple = PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = PoolFormerGroupNorm(UpperCamelCase__ ) lowerCamelCase__ : Tuple = PoolFormerGroupNorm(UpperCamelCase__ ) # Useful for training neural nets lowerCamelCase__ : Dict = PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity() lowerCamelCase__ : Dict = config.use_layer_scale if config.use_layer_scale: lowerCamelCase__ : Dict = nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: int ): if self.use_layer_scale: lowerCamelCase__ : Any = self.pooling(self.before_norm(UpperCamelCase__ ) ) lowerCamelCase__ : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowerCamelCase__ : Tuple = hidden_states + self.drop_path(UpperCamelCase__ ) lowerCamelCase__ : int = () lowerCamelCase__ : Any = self.output(self.after_norm(UpperCamelCase__ ) ) lowerCamelCase__ : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowerCamelCase__ : Dict = hidden_states + self.drop_path(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = (output,) + outputs return outputs else: lowerCamelCase__ : List[str] = self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) ) # First residual connection lowerCamelCase__ : int = pooling_output + hidden_states lowerCamelCase__ : int = () # Second residual connection inside the PoolFormerOutput block lowerCamelCase__ : Optional[Any] = self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) ) lowerCamelCase__ : Tuple = hidden_states + layer_output lowerCamelCase__ : Union[str, Any] = (output,) + outputs return outputs class _lowercase ( nn.Module ): def __init__( self: Union[str, Any] , UpperCamelCase__: int ): super().__init__() lowerCamelCase__ : Tuple = config # stochastic depth decay rule lowerCamelCase__ : Optional[int] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowerCamelCase__ : Dict = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowerCamelCase__ : Optional[Any] = nn.ModuleList(UpperCamelCase__ ) # Transformer blocks lowerCamelCase__ : List[str] = [] lowerCamelCase__ : Any = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowerCamelCase__ : str = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(UpperCamelCase__ ) ) lowerCamelCase__ : List[str] = nn.ModuleList(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: Optional[Any]=True ): lowerCamelCase__ : List[Any] = () if output_hidden_states else None lowerCamelCase__ : List[str] = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = layers # Get patch embeddings from hidden_states lowerCamelCase__ : List[Any] = embedding_layer(UpperCamelCase__ ) # Send the embeddings through the blocks for _, blk in enumerate(UpperCamelCase__ ): lowerCamelCase__ : Any = blk(UpperCamelCase__ ) lowerCamelCase__ : Tuple = layer_outputs[0] if output_hidden_states: lowerCamelCase__ : Tuple = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ ) class _lowercase ( _lowercase ): a = PoolFormerConfig a = """poolformer""" a = """pixel_values""" a = True def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] ): if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any]=False ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Optional[Any] = value _A : Any =r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _A : List[Any] =r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _lowercase , ) class _lowercase ( _lowercase ): def __init__( self: str , UpperCamelCase__: str ): super().__init__(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = config lowerCamelCase__ : List[str] = PoolFormerEncoder(UpperCamelCase__ ) # Initialize weights and apply final processing self.post_init() def lowerCamelCase_ ( self: List[str] ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[torch.FloatTensor] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[bool] = None , ): lowerCamelCase__ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowerCamelCase__ : List[Any] = self.encoder( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) lowerCamelCase__ : Any = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) class _lowercase ( nn.Module ): def __init__( self: Tuple , UpperCamelCase__: Optional[int] ): super().__init__() lowerCamelCase__ : Any = nn.Linear(config.hidden_size , config.hidden_size ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Union[str, Any] = self.dense(UpperCamelCase__ ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , _lowercase , ) class _lowercase ( _lowercase ): def __init__( self: Any , UpperCamelCase__: Optional[int] ): super().__init__(UpperCamelCase__ ) lowerCamelCase__ : str = config.num_labels lowerCamelCase__ : Dict = PoolFormerModel(UpperCamelCase__ ) # Final norm lowerCamelCase__ : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowerCamelCase__ : Dict = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[torch.FloatTensor] = None , UpperCamelCase__: Optional[torch.LongTensor] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[bool] = None , ): lowerCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ : List[Any] = self.poolformer( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) lowerCamelCase__ : int = outputs[0] lowerCamelCase__ : str = self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) ) lowerCamelCase__ : str = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase__ : Optional[Any] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase__ : List[str] = """single_label_classification""" else: lowerCamelCase__ : str = """multi_label_classification""" if self.config.problem_type == "regression": lowerCamelCase__ : Optional[int] = MSELoss() if self.num_labels == 1: lowerCamelCase__ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCamelCase__ : Optional[Any] = loss_fct(UpperCamelCase__ , UpperCamelCase__ ) elif self.config.problem_type == "single_label_classification": lowerCamelCase__ : Dict = CrossEntropyLoss() lowerCamelCase__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase__ : Optional[int] = BCEWithLogitsLoss() lowerCamelCase__ : Optional[int] = loss_fct(UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: lowerCamelCase__ : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
631
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _A : int =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: print("""Loading config file...""" ) def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): lowerCamelCase__ : Optional[int] = [] for k, v in d.items(): lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCamelCase ) lowerCamelCase__ : Any = argparse.Namespace() with open(UpperCamelCase , """r""" ) as yaml_file: try: lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader ) lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase ) for k, v in flat_cfg.items(): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) ) return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig() lowerCamelCase__ : str = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ : Optional[Any] = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : int = 384 else: lowerCamelCase__ : Optional[int] = 256 lowerCamelCase__ : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ : Tuple = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : str = 384 else: lowerCamelCase__ : Any = 256 lowerCamelCase__ : int = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ : Dict = 151 lowerCamelCase__ : str = 512 lowerCamelCase__ : List[Any] = """ade20k-id2label.json""" lowerCamelCase__ : Union[str, Any] = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ : Tuple = 21 lowerCamelCase__ : Optional[int] = 512 lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json""" lowerCamelCase__ : Tuple = True # orig_config lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase ) assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ : Tuple = """huggingface/label-files""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple: if base_model: lowerCamelCase__ : Optional[int] = """""" else: lowerCamelCase__ : Optional[Any] = """mobilevitv2.""" lowerCamelCase__ : List[str] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ : Optional[Any] = k[8:] else: lowerCamelCase__ : Optional[Any] = k if ".block." in k: lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ : int = [0, 1] elif i == 4: lowerCamelCase__ : str = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ : List[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase ) # load original state_dict lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval() lowerCamelCase__ : Tuple = False else: lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval() lowerCamelCase__ : Optional[Any] = False # remove and rename some keys of load the original model lowerCamelCase__ : Tuple = checkpoint remove_unused_keys(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load modified state_dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ : str = model(**UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ : Dict = outputs.logits lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ) assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Dict =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
631
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _A : int =get_tests_dir('''fixtures/test_sentencepiece.model''') _A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''} _A : int ='''>>zh<<''' _A : Dict ='''Helsinki-NLP/''' if is_torch_available(): _A : List[Any] ='''pt''' elif is_tf_available(): _A : Optional[int] ='''tf''' else: _A : Dict ='''jax''' @require_sentencepiece class _lowercase ( _lowercase , unittest.TestCase ): a = MarianTokenizer a = False a = True def lowerCamelCase_ ( self: List[str] ): super().setUp() lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ): return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = """</s>""" lowerCamelCase__ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase__ : List[str] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase_ ( self: List[str] ): # fmt: off lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) lowerCamelCase__ : str = """Tämä on testi""" lowerCamelCase__ : Any = """This is a test""" lowerCamelCase__ : int = [76, 7, 2_047, 2] lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2] lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
631
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) ) class _lowercase : def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = conv_kernel_size lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Tuple = width_multiplier lowerCamelCase__ : List[Any] = ffn_dropout lowerCamelCase__ : Any = attn_dropout def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: List[Any] ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ): lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) a = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = MobileViTVaModelTester(self ) lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple = [*signature.parameters.keys()] lowerCamelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs.hidden_states lowerCamelCase__ : List[Any] = 5 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ : int = 2 for i in range(len(UpperCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : str = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : str = outputs.logits # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Dict = model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = outputs.logits.detach().cpu() lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] ) lowerCamelCase__ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase__ : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
631
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _A : Optional[Any] =TypeVar('''T''') class _lowercase ( Generic[T] ): def __init__( self: List[str] , UpperCamelCase__: T ): lowerCamelCase__ : Union[str, Any] = data lowerCamelCase__ : Node[T] | None = None def __str__( self: str ): return F'''{self.data}''' class _lowercase ( Generic[T] ): def __init__( self: List[Any] ): lowerCamelCase__ : Node[T] | None = None def __iter__( self: Optional[int] ): lowerCamelCase__ : List[Any] = self.top while node: yield node.data lowerCamelCase__ : Union[str, Any] = node.next def __str__( self: Tuple ): return "->".join([str(UpperCamelCase__ ) for item in self] ) def __len__( self: Optional[int] ): return len(tuple(iter(self ) ) ) def lowerCamelCase_ ( self: Tuple ): return self.top is None def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: T ): lowerCamelCase__ : Any = Node(UpperCamelCase__ ) if not self.is_empty(): lowerCamelCase__ : List[Any] = self.top lowerCamelCase__ : List[str] = node def lowerCamelCase_ ( self: Tuple ): if self.is_empty(): raise IndexError("""pop from empty stack""" ) assert isinstance(self.top , UpperCamelCase__ ) lowerCamelCase__ : Dict = self.top lowerCamelCase__ : Optional[int] = self.top.next return pop_node.data def lowerCamelCase_ ( self: int ): if self.is_empty(): raise IndexError("""peek from empty stack""" ) assert self.top is not None return self.top.data def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Any = None if __name__ == "__main__": from doctest import testmod testmod()
631
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Optional[Any] =logging.get_logger(__name__) _A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Tuple ={ '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } _A : List[Any] ={ '''gpt-neox-20b''': 2_048, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = add_prefix_space def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : int = input_ids[-self.model_max_length :] return input_ids
631
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _A : Optional[Any] =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : List[str] = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , ) lowerCamelCase__ : List[Any] = DetaConfig( backbone_config=UpperCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase , with_box_refine=UpperCamelCase , two_stage=UpperCamelCase , ) # set labels lowerCamelCase__ : str = """huggingface/label-files""" if "o365" in model_name: lowerCamelCase__ : Tuple = 366 lowerCamelCase__ : List[Any] = """object365-id2label.json""" else: lowerCamelCase__ : str = 91 lowerCamelCase__ : List[str] = """coco-detection-id2label.json""" lowerCamelCase__ : str = num_labels lowerCamelCase__ : str = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : str = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : Tuple = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") ) rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") ) rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") ) rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") ) rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") ) rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCamelCase__ : str = dct.pop(UpperCamelCase ) lowerCamelCase__ : List[str] = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCamelCase__ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCamelCase__ : Optional[int] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCamelCase__ : Tuple = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) lowerCamelCase__ : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ : List[Any] = in_proj_weight[:dim, :] lowerCamelCase__ : List[Any] = in_proj_bias[: dim] lowerCamelCase__ : Dict = in_proj_weight[ dim : dim * 2, : ] lowerCamelCase__ : Any = in_proj_bias[ dim : dim * 2 ] lowerCamelCase__ : str = in_proj_weight[ -dim :, : ] lowerCamelCase__ : str = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: # transformer decoder self-attention layers lowerCamelCase__ : int = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention lowerCamelCase__ : Tuple = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase__ : Optional[Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ : Tuple = in_proj_weight[:hidden_size, :] lowerCamelCase__ : List[str] = in_proj_bias[:hidden_size] lowerCamelCase__ : Dict = in_proj_weight[ hidden_size : hidden_size * 2, : ] lowerCamelCase__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2] lowerCamelCase__ : Optional[int] = in_proj_weight[-hidden_size:, :] lowerCamelCase__ : Any = in_proj_bias[-hidden_size:] def SCREAMING_SNAKE_CASE_ () -> str: lowerCamelCase__ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ : List[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : Optional[Any] = get_deta_config(UpperCamelCase ) # load original state dict if model_name == "deta-swin-large": lowerCamelCase__ : Optional[Any] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" ) elif model_name == "deta-swin-large-o365": lowerCamelCase__ : Union[str, Any] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" ) else: raise ValueError(f'''Model name {model_name} not supported''' ) lowerCamelCase__ : Dict = torch.load(UpperCamelCase , map_location="""cpu""" )["""model"""] # original state dict for name, param in state_dict.items(): print(UpperCamelCase , param.shape ) # rename keys lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) read_in_swin_q_k_v(UpperCamelCase , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase , UpperCamelCase ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: lowerCamelCase__ : Optional[int] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : str = val if "input_proj" in key: lowerCamelCase__ : Dict = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: lowerCamelCase__ : Optional[int] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Tuple = val # finally, create HuggingFace model and load state dict lowerCamelCase__ : Tuple = DetaForObjectDetection(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() lowerCamelCase__ : Tuple = """cuda""" if torch.cuda.is_available() else """cpu""" model.to(UpperCamelCase ) # load image processor lowerCamelCase__ : Any = DetaImageProcessor(format="""coco_detection""" ) # verify our conversion on image lowerCamelCase__ : Optional[int] = prepare_img() lowerCamelCase__ : int = processor(images=UpperCamelCase , return_tensors="""pt""" ) lowerCamelCase__ : List[str] = encoding["""pixel_values"""] lowerCamelCase__ : List[str] = model(pixel_values.to(UpperCamelCase ) ) # verify logits print("""Logits:""" , outputs.logits[0, :3, :3] ) print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": lowerCamelCase__ : Tuple = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) lowerCamelCase__ : int = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": lowerCamelCase__ : Tuple = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) lowerCamelCase__ : Dict = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase ) , atol=1E-4 ) print("""Everything ok!""" ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) # Push to hub if push_to_hub: print("""Pushing model and processor to hub...""" ) model.push_to_hub(f'''jozhang97/{model_name}''' ) processor.push_to_hub(f'''jozhang97/{model_name}''' ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A : List[str] =parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Dict ={ '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =[ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _A : List[Any] =16 _A : List[str] =32 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 16 , UpperCamelCase = "bert-base-cased" ) -> Union[str, Any]: lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase ) lowerCamelCase__ : Tuple = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase__ : Dict = datasets.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ : str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(UpperCamelCase , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCamelCase__ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) lowerCamelCase__ : List[str] = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: model.eval() lowerCamelCase__ : Any = 0 for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**UpperCamelCase ) lowerCamelCase__ : Tuple = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase ) - 1: lowerCamelCase__ : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCamelCase__ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase , references=UpperCamelCase , ) lowerCamelCase__ : Optional[Any] = metric.compute() return eval_metric["accuracy"] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: # Initialize accelerator lowerCamelCase__ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ : Union[str, Any] = config["""lr"""] lowerCamelCase__ : str = int(config["""num_epochs"""] ) lowerCamelCase__ : Tuple = int(config["""seed"""] ) lowerCamelCase__ : List[Any] = int(config["""batch_size"""] ) lowerCamelCase__ : Dict = args.model_name_or_path set_seed(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Optional[int] = get_dataloaders(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ : str = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase ) # Instantiate optimizer lowerCamelCase__ : Dict = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCamelCase__ : Any = 1 lowerCamelCase__ : Any = (len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase__ : List[str] = get_linear_schedule_with_warmup( optimizer=UpperCamelCase , num_warmup_steps=0 , num_training_steps=UpperCamelCase , ) else: lowerCamelCase__ : Union[str, Any] = DummyScheduler(UpperCamelCase , total_num_steps=UpperCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # We need to keep track of how many total steps we have iterated over lowerCamelCase__ : str = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase__ : Any = 0 lowerCamelCase__ : List[Any] = evaluate.load("""glue""" , """mrpc""" ) lowerCamelCase__ : Union[str, Any] = num_epochs if args.partial_train_epoch is not None: lowerCamelCase__ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCamelCase__ : Dict = args.resume_from_checkpoint.split("""epoch_""" )[1] lowerCamelCase__ : Union[str, Any] = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCamelCase__ : Union[str, Any] = int(UpperCamelCase ) + 1 lowerCamelCase__ : Dict = evaluation_loop(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) accelerator.print("""resumed checkpoint performance:""" , UpperCamelCase ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , """r""" ) as f: lowerCamelCase__ : str = json.load(UpperCamelCase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCamelCase__ : Optional[Any] = {} for epoch in range(UpperCamelCase , UpperCamelCase ): model.train() for step, batch in enumerate(UpperCamelCase ): lowerCamelCase__ : str = model(**UpperCamelCase ) lowerCamelCase__ : List[str] = outputs.loss lowerCamelCase__ : List[Any] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCamelCase__ : Optional[int] = f'''epoch_{epoch}''' lowerCamelCase__ : List[str] = os.path.join(args.output_dir , UpperCamelCase ) accelerator.save_state(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = evaluation_loop(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[Any] = accuracy lowerCamelCase__ : List[Any] = lr_scheduler.get_lr()[0] lowerCamelCase__ : Dict = optimizer.param_groups[0]["""lr"""] lowerCamelCase__ : Optional[int] = epoch lowerCamelCase__ : Union[str, Any] = overall_step accelerator.print(f'''epoch {epoch}:''' , UpperCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , """w""" ) as f: json.dump(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]: lowerCamelCase__ : str = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCamelCase , ) parser.add_argument( """--output_dir""" , type=UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=UpperCamelCase , default=UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=UpperCamelCase , default=UpperCamelCase , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=UpperCamelCase , default=2 , help="""Number of train epochs.""" , ) lowerCamelCase__ : Union[str, Any] = parser.parse_args() lowerCamelCase__ : Tuple = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": main()
631
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _A : int =get_tests_dir('''fixtures/test_sentencepiece.model''') _A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''} _A : int ='''>>zh<<''' _A : Dict ='''Helsinki-NLP/''' if is_torch_available(): _A : List[Any] ='''pt''' elif is_tf_available(): _A : Optional[int] ='''tf''' else: _A : Dict ='''jax''' @require_sentencepiece class _lowercase ( _lowercase , unittest.TestCase ): a = MarianTokenizer a = False a = True def lowerCamelCase_ ( self: List[str] ): super().setUp() lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ): return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = """</s>""" lowerCamelCase__ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase__ : List[str] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase_ ( self: List[str] ): # fmt: off lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) lowerCamelCase__ : str = """Tämä on testi""" lowerCamelCase__ : Any = """This is a test""" lowerCamelCase__ : int = [76, 7, 2_047, 2] lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2] lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
631
1
'''simple docstring''' import random def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> dict: lowerCamelCase__ : dict = {i: [] for i in range(UpperCamelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCamelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCamelCase ): for j in range(i + 1 , UpperCamelCase ): if random.random() < probability: graph[i].append(UpperCamelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCamelCase ) return graph def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict: return { i: [j for j in range(UpperCamelCase ) if i != j] for i in range(UpperCamelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """rwkv""" a = {"""max_position_embeddings""": """context_length"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = context_length lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase__ : List[str] = layer_norm_epsilon lowerCamelCase__ : int = rescale_every lowerCamelCase__ : Optional[int] = use_cache lowerCamelCase__ : Dict = bos_token_id lowerCamelCase__ : Any = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : str =logging.get_logger(__name__) _A : int ={ '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """roc_bert""" def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Optional[Any] = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Tuple = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Tuple = type_vocab_size lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : List[Any] = use_cache lowerCamelCase__ : Tuple = enable_pronunciation lowerCamelCase__ : Union[str, Any] = enable_shape lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim lowerCamelCase__ : Any = pronunciation_vocab_size lowerCamelCase__ : int = shape_embed_dim lowerCamelCase__ : Tuple = shape_vocab_size lowerCamelCase__ : Optional[Any] = concat_input lowerCamelCase__ : str = position_embedding_type lowerCamelCase__ : Dict = classifier_dropout super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowercase ( yaml.SafeLoader ): def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : int = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCamelCase__ : Optional[Any] = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys] lowerCamelCase__ : List[Any] = Counter(UpperCamelCase__ ) lowerCamelCase__ : int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=False ): lowerCamelCase__ : str = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ ) self._check_no_duplicates_on_constructed_node(UpperCamelCase__ ) return mapping def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple[Optional[str], str]: lowerCamelCase__ : Dict = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCamelCase__ : Any = full_content[1:].index("""---""" ) + 1 lowerCamelCase__ : int = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(UpperCamelCase ) class _lowercase ( _lowercase ): # class attributes a = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def lowerCamelCase_ ( cls: List[str] , UpperCamelCase__: Path ): with open(UpperCamelCase__ , encoding="""utf-8""" ) as readme_file: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCamelCase__ ) else: return cls() def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Path ): if path.exists(): with open(UpperCamelCase__ , encoding="""utf-8""" ) as readme_file: lowerCamelCase__ : List[str] = readme_file.read() else: lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : Union[str, Any] = self._to_readme(UpperCamelCase__ ) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[str] = None ): if readme_content is not None: lowerCamelCase__ , lowerCamelCase__ : Tuple = _split_yaml_from_readme(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = """---\n""" + self.to_yaml_string() + """---\n""" + content else: lowerCamelCase__ : Tuple = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase__: str ): lowerCamelCase__ : Tuple = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCamelCase__ : List[str] = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding="""utf-8""" , ).decode("""utf-8""" ) _A : List[Any] ={ '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser _A : Tuple =ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') _A : int =ap.parse_args() _A : Optional[Any] =Path(args.readme_filepath) _A : List[str] =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
631
'''simple docstring''' import sys import turtle def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) _A : Any =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') _A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
631
1
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: int ): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[str] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Any = self._create_example_records() lowerCamelCase__ : str = Dataset.from_list(UpperCamelCase__ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(UpperCamelCase__ ): self.assertDictEqual(UpperCamelCase__ , example_records[i] ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = self._create_example_records() lowerCamelCase__ : List[str] = Dataset.from_list(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def lowerCamelCase_ ( self: Optional[Any] ): # checks what happens with missing columns lowerCamelCase__ : List[Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}] lowerCamelCase__ : Dict = Dataset.from_list(UpperCamelCase__ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def lowerCamelCase_ ( self: int ): # checks if the type can be inferred from the second record lowerCamelCase__ : Dict = [{"""col_1""": []}, {"""col_1""": [1, 2]}] lowerCamelCase__ : int = Dataset.from_list(UpperCamelCase__ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = Dataset.from_list([] ) self.assertEqual(len(UpperCamelCase__ ) , 0 ) self.assertListEqual(dset.column_names , [] )
631
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowercase : def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Union[str, Any] = 13 lowerCamelCase__ : Any = 7 lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : str = 99 lowerCamelCase__ : Dict = 384 lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Optional[Any] = 37 lowerCamelCase__ : Union[str, Any] = """gelu""" lowerCamelCase__ : int = 0.1 lowerCamelCase__ : Optional[Any] = 0.1 lowerCamelCase__ : List[Any] = 512 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Any = 2 lowerCamelCase__ : Optional[Any] = 0.02 lowerCamelCase__ : int = 3 lowerCamelCase__ : List[str] = 4 lowerCamelCase__ : Any = 128 lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Optional[Any] = 9 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Optional[int] = None def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : str = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : int = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ ) lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Tuple = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : int = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ): lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ ) lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : str = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a = False a = False a = False def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = TFConvBertModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = True lowerCamelCase__ : Tuple = True if hasattr(UpperCamelCase__ , """use_cache""" ): lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ ) lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""] lowerCamelCase__ : Any = outputs["""encoder_attentions"""] else: lowerCamelCase__ : int = outputs["""hidden_states"""] lowerCamelCase__ : Optional[int] = outputs["""attentions"""] self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase__ : Any = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase__: List[str] ): lowerCamelCase__ : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : List[Any] = True lowerCamelCase__ : int = True lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : Dict = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
631
1
'''simple docstring''' import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _A : Union[str, Any] =logging.get_logger(__name__) _A : List[Any] ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _A : List[str] =[ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: for attribute in key.split(""".""" ): lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: lowerCamelCase__ : Tuple = getattr(UpperCamelCase , UpperCamelCase ).shape else: lowerCamelCase__ : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase__ : List[str] = value elif weight_type == "weight_g": lowerCamelCase__ : List[Any] = value elif weight_type == "weight_v": lowerCamelCase__ : List[str] = value elif weight_type == "bias": lowerCamelCase__ : Optional[Any] = value else: lowerCamelCase__ : Optional[Any] = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : List[str] = [] lowerCamelCase__ : List[str] = fairseq_model.state_dict() lowerCamelCase__ : Tuple = hf_model.feature_extractor lowerCamelCase__ : Dict = hf_model.adapter for name, value in fairseq_dict.items(): lowerCamelCase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase__ : List[str] = True elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ): load_adapter(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase__ : Dict = True if "*" in mapped_key: lowerCamelCase__ : Dict = name.split(UpperCamelCase )[0].split(""".""" )[-2] lowerCamelCase__ : Union[str, Any] = mapped_key.replace("""*""" , UpperCamelCase ) if "weight_g" in name: lowerCamelCase__ : List[Any] = """weight_g""" elif "weight_v" in name: lowerCamelCase__ : int = """weight_v""" elif "bias" in name: lowerCamelCase__ : Tuple = """bias""" elif "weight" in name: lowerCamelCase__ : Any = """weight""" else: lowerCamelCase__ : str = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: lowerCamelCase__ : Optional[int] = full_name.split("""conv_layers.""" )[-1] lowerCamelCase__ : Optional[Any] = name.split(""".""" ) lowerCamelCase__ : Dict = int(items[0] ) lowerCamelCase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase__ : Union[str, Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase__ : Optional[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase__ : Any = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase__ : Tuple = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : Any = full_name.split("""adaptor.""" )[-1] lowerCamelCase__ : Union[str, Any] = name.split(""".""" ) if items[1].isdigit(): lowerCamelCase__ : Union[str, Any] = int(items[1] ) else: lowerCamelCase__ : Optional[Any] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' lowerCamelCase__ : Any = value logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' lowerCamelCase__ : str = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' lowerCamelCase__ : List[Any] = value logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' lowerCamelCase__ : str = value logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(UpperCamelCase , UpperCamelCase ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' lowerCamelCase__ : Union[str, Any] = value logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' lowerCamelCase__ : List[Any] = value logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]: lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape lowerCamelCase__ : Optional[Any] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) lowerCamelCase__ : List[Any] = emb.weight.data return lin_layer @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Any: lowerCamelCase__ : Union[str, Any] = WavaVecaConfig.from_pretrained( UpperCamelCase , add_adapter=UpperCamelCase , adapter_stride=UpperCamelCase , adapter_kernel_size=UpperCamelCase , use_auth_token=UpperCamelCase , output_hidden_size=UpperCamelCase , ) lowerCamelCase__ : Optional[Any] = MBartConfig.from_pretrained(UpperCamelCase ) # load model lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ """config_yaml""": config_yaml_path, """data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path, """load_pretrained_decoder_from""": None, } , ) lowerCamelCase__ : Optional[Any] = model[0].eval() # load feature extractor lowerCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase , use_auth_token=UpperCamelCase ) # set weights for wav2vec2 encoder lowerCamelCase__ : Any = WavaVecaModel(UpperCamelCase ) recursively_load_weights_wavaveca(model.encoder , UpperCamelCase ) # load decoder weights lowerCamelCase__ : Optional[Any] = MBartForCausalLM(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase ) logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowerCamelCase__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase ) lowerCamelCase__ : Tuple = False lowerCamelCase__ : int = MBartaaTokenizer(UpperCamelCase ) tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = hf_wavavec.config.to_dict() lowerCamelCase__ : int = tokenizer.pad_token_id lowerCamelCase__ : Union[str, Any] = tokenizer.bos_token_id lowerCamelCase__ : Optional[int] = tokenizer.eos_token_id lowerCamelCase__ : List[str] = """mbart50""" lowerCamelCase__ : List[str] = """wav2vec2""" lowerCamelCase__ : Optional[int] = tokenizer.eos_token_id lowerCamelCase__ : Tuple = 250004 lowerCamelCase__ : List[Any] = tokenizer.eos_token_id lowerCamelCase__ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase ) hf_wavavec.save_pretrained(UpperCamelCase ) feature_extractor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[Any] =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-xls-r-1b''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/mbart-large-50-one-to-many-mmt''', type=str, help='''Path to hf decoder checkpoint config''', ) parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''') parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''') parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''') parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''') parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''') _A : List[Any] =parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
631
'''simple docstring''' _A : List[str] ='''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
631
1
'''simple docstring''' from math import sqrt def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: lowerCamelCase__ : Optional[int] = 0 for i in range(1 , int(sqrt(UpperCamelCase ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase ): total += i + n // i elif i == sqrt(UpperCamelCase ): total += i return total - n def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10000 ) -> int: lowerCamelCase__ : List[str] = sum( i for i in range(1 , UpperCamelCase ) if sum_of_divisors(sum_of_divisors(UpperCamelCase ) ) == i and sum_of_divisors(UpperCamelCase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Any =logging.get_logger(__name__) _A : Dict ={ '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _lowercase ( _lowercase ): a = """trocr""" a = ["""past_key_values"""] a = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ): lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Tuple = d_model lowerCamelCase__ : Any = decoder_layers lowerCamelCase__ : Dict = decoder_attention_heads lowerCamelCase__ : str = decoder_ffn_dim lowerCamelCase__ : Tuple = activation_function lowerCamelCase__ : Dict = max_position_embeddings lowerCamelCase__ : int = dropout lowerCamelCase__ : int = attention_dropout lowerCamelCase__ : List[Any] = activation_dropout lowerCamelCase__ : Union[str, Any] = init_std lowerCamelCase__ : Optional[int] = decoder_layerdrop lowerCamelCase__ : Dict = use_cache lowerCamelCase__ : Any = scale_embedding lowerCamelCase__ : Optional[int] = use_learned_position_embeddings lowerCamelCase__ : List[str] = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
631
1
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase ( _lowercase ): def __init__( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int]=13 , UpperCamelCase__: Dict=7 , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Dict=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: int=99 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: List[str]=5 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Any=False , UpperCamelCase__: Dict=True , UpperCamelCase__: List[Any]="None" , UpperCamelCase__: Dict=3 , UpperCamelCase__: int=4 , UpperCamelCase__: Optional[int]=None , ): lowerCamelCase__ : List[str] = parent lowerCamelCase__ : Tuple = batch_size lowerCamelCase__ : Tuple = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Tuple = use_input_mask lowerCamelCase__ : Dict = use_token_type_ids lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[int] = hidden_size lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : str = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Tuple = hidden_act lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob lowerCamelCase__ : int = attention_probs_dropout_prob lowerCamelCase__ : str = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Optional[int] = type_sequence_label_size lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : List[Any] = num_labels lowerCamelCase__ : str = num_choices lowerCamelCase__ : Optional[Any] = relative_attention lowerCamelCase__ : int = position_biased_input lowerCamelCase__ : int = pos_att_type lowerCamelCase__ : int = scope def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_input_mask: lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCamelCase__ : int = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : List[Any] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.get_config() lowerCamelCase__ : str = 300 return config def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : str = DebertaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0] lowerCamelCase__ : List[str] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0] lowerCamelCase__ : int = model(UpperCamelCase__ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: Dict ): lowerCamelCase__ : int = DebertaForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ): lowerCamelCase__ : str = self.num_labels lowerCamelCase__ : str = DebertaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Tuple = DebertaForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : List[str] = DebertaForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Optional[int] = config_and_inputs lowerCamelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) a = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) a = True a = False a = False a = False a = False def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = DebertaModelTester(self ) lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Optional[int] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Any ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : int = DebertaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @unittest.skip(reason="""Model not available yet""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) lowerCamelCase__ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) lowerCamelCase__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] # compare the actual values for a slice. lowerCamelCase__ : Any = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
631
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : str = [False] * len(UpperCamelCase ) lowerCamelCase__ : str = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Union[str, Any] = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
631
1
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: List[str]=30 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: str=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: List[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: str=10 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: List[Any]=2 , ): lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : str = batch_size lowerCamelCase__ : str = image_size lowerCamelCase__ : Union[str, Any] = patch_size lowerCamelCase__ : int = num_channels lowerCamelCase__ : List[Any] = is_training lowerCamelCase__ : str = use_labels lowerCamelCase__ : Any = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Dict = type_sequence_label_size lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : List[str] = scope lowerCamelCase__ : Union[str, Any] = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Optional[int] = (image_size // patch_size) ** 2 lowerCamelCase__ : int = num_patches + 1 def lowerCamelCase_ ( self: str ): lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : List[str] = None if self.use_labels: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : str = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Union[str, Any] ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : str = ViTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: int , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ): lowerCamelCase__ : int = ViTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : List[Any] = 1 lowerCamelCase__ : str = ViTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Union[str, Any] = self.type_sequence_label_size lowerCamelCase__ : List[str] = ViTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : List[Any] = ViTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : List[str] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Tuple = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Optional[Any] = config_and_inputs lowerCamelCase__ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) a = ( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) a = True a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : List[Any] = ViTModelTester(self ) lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase_ ( self: List[Any] ): pass def lowerCamelCase_ ( self: Any ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = model_class(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : int = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Any ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Optional[int] = ViTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]: lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Union[str, Any] ): return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : int = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = self.default_image_processor lowerCamelCase__ : Any = prepare_img() lowerCamelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. lowerCamelCase__ : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCamelCase__ ) lowerCamelCase__ : int = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 ) lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ) lowerCamelCase__ : Optional[int] = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ ) # verify the logits lowerCamelCase__ : Dict = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ ) lowerCamelCase__ : int = torch.tensor( [[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : str = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ) lowerCamelCase__ : Dict = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCamelCase__ : str = model(UpperCamelCase__ )
631
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Optional[int] = dataset lowerCamelCase__ : Optional[int] = process lowerCamelCase__ : List[str] = params def __len__( self: List[str] ): return len(self.dataset ) def __getitem__( self: Any , UpperCamelCase__: int ): lowerCamelCase__ : Dict = self.dataset[i] lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params ) return processed class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ): lowerCamelCase__ : int = loader lowerCamelCase__ : str = infer lowerCamelCase__ : Optional[int] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : int = loader_batch_size # Internal bookkeeping lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None def __len__( self: Dict ): return len(self.loader ) def __iter__( self: Optional[int] ): lowerCamelCase__ : List[Any] = iter(self.loader ) return self def lowerCamelCase_ ( self: Any ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase__ : int = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Convert ModelOutput to tuple first lowerCamelCase__ : str = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase__ : List[str] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase__ : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ ) self._loader_batch_index += 1 return result def lowerCamelCase_ ( self: List[Any] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase__ : Optional[Any] = next(self.iterator ) lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Optional[Any] = processed else: lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0] lowerCamelCase__ : Any = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Any = len(UpperCamelCase__ ) else: lowerCamelCase__ : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase__ : List[Any] = processed lowerCamelCase__ : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _lowercase ( _lowercase ): def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ): super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __iter__( self: Union[str, Any] ): lowerCamelCase__ : str = iter(self.loader ) lowerCamelCase__ : int = None return self def lowerCamelCase_ ( self: str ): if self.subiterator is None: lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase__ : Tuple = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase__ : Union[str, Any] = next(self.subiterator ) return processed class _lowercase ( _lowercase ): def __iter__( self: List[Any] ): lowerCamelCase__ : int = iter(self.loader ) return self def lowerCamelCase_ ( self: Tuple ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase__ : List[str] = False lowerCamelCase__ : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : Any = self.loader_batch_item() lowerCamelCase__ : Tuple = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator while not is_last: lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Dict = processed else: lowerCamelCase__ : Dict = list(processed.keys() )[0] lowerCamelCase__ : Dict = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) else: lowerCamelCase__ : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : str = observed_batch_size lowerCamelCase__ : str = processed lowerCamelCase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : List[Any] = self.loader_batch_item() lowerCamelCase__ : str = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator else: lowerCamelCase__ : Optional[Any] = processed lowerCamelCase__ : Optional[int] = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) return accumulator class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ): lowerCamelCase__ : Union[str, Any] = dataset lowerCamelCase__ : str = key def __len__( self: Optional[Any] ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Any ): return self.dataset[i][self.key] class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ): lowerCamelCase__ : str = dataset lowerCamelCase__ : Dict = keya lowerCamelCase__ : List[str] = keya def __len__( self: str ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
631
1
'''simple docstring''' from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : Optional[int] = 0 # Number of processes finished lowerCamelCase__ : Union[str, Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ : Tuple = [0] * no_of_process # List to include calculation results lowerCamelCase__ : List[str] = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )] lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ : str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ : Union[str, Any] = arrival_time[i] lowerCamelCase__ : Any = 0 # Index showing the location of the process being performed lowerCamelCase__ : Union[str, Any] = 0 # Saves the current response ratio. lowerCamelCase__ : Any = 0 for i in range(0 , UpperCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ : int = temp lowerCamelCase__ : str = i # Calculate the turn around time lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : int = [0] * no_of_process for i in range(0 , UpperCamelCase ): lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A : List[str] =5 _A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E'''] _A : Optional[int] =[1, 2, 3, 4, 5] _A : Dict =[1, 2, 3, 4, 5] _A : Any =calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A : Optional[int] =calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
631
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _A : Dict ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
631
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def lowerCamelCase_ ( *UpperCamelCase__: Optional[int] , **UpperCamelCase__: List[Any] ): pass @is_pipeline_test @require_vision class _lowercase ( unittest.TestCase ): @require_torch def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Tuple = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , ) lowerCamelCase__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ : List[str] = image_classifier(UpperCamelCase__ , candidate_labels=["""a""", """b""", """c"""] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCamelCase__ ) , [ [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}], ] , ) lowerCamelCase__ : List[str] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], ] , ) @require_tf def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : List[Any] = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" ) lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ : List[str] = image_classifier(UpperCamelCase__ , candidate_labels=["""a""", """b""", """c"""] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , ) lowerCamelCase__ : int = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], [ {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, {"""score""": 0.333, """label""": ANY(UpperCamelCase__ )}, ], ] , ) @slow @require_torch def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = pipeline( task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ : str = image_classifier(UpperCamelCase__ , candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ] , ) lowerCamelCase__ : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5 , ) @slow @require_tf def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Optional[int] = pipeline( task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" ) # This is an image of 2 cats with remotes and no planes lowerCamelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ : Optional[Any] = image_classifier(UpperCamelCase__ , candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ] , ) lowerCamelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5 , )
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _A : Any ={ '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class _lowercase ( _lowercase ): a = (CMStochasticIterativeScheduler,) a = 10 def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: int ): lowerCamelCase__ : Optional[int] = { """num_train_timesteps""": 201, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**UpperCamelCase__ ) return config def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : List[str] = 10 lowerCamelCase__ : int = self.get_scheduler_config() lowerCamelCase__ : Optional[int] = self.scheduler_classes[0](**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) lowerCamelCase__ : Tuple = scheduler.timesteps[0] lowerCamelCase__ : Optional[Any] = scheduler.timesteps[1] lowerCamelCase__ : str = self.dummy_sample lowerCamelCase__ : Dict = 0.1 * sample lowerCamelCase__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample lowerCamelCase__ : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self: Optional[int] ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Any = self.scheduler_classes[0] lowerCamelCase__ : Optional[Any] = self.get_scheduler_config() lowerCamelCase__ : int = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : int = 1 scheduler.set_timesteps(UpperCamelCase__ ) lowerCamelCase__ : Dict = scheduler.timesteps lowerCamelCase__ : Any = torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] = self.dummy_model() lowerCamelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase__ ): # 1. scale model input lowerCamelCase__ : int = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict noise residual lowerCamelCase__ : str = model(UpperCamelCase__ , UpperCamelCase__ ) # 3. predict previous sample x_t-1 lowerCamelCase__ : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowerCamelCase__ : Optional[Any] = pred_prev_sample lowerCamelCase__ : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : Any = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : str = self.scheduler_classes[0] lowerCamelCase__ : Optional[Any] = self.get_scheduler_config() lowerCamelCase__ : Optional[Any] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = [106, 0] scheduler.set_timesteps(timesteps=UpperCamelCase__ ) lowerCamelCase__ : List[Any] = scheduler.timesteps lowerCamelCase__ : Any = torch.manual_seed(0 ) lowerCamelCase__ : int = self.dummy_model() lowerCamelCase__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowerCamelCase__ : str = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict noise residual lowerCamelCase__ : Tuple = model(UpperCamelCase__ , UpperCamelCase__ ) # 3. predict previous sample x_t-1 lowerCamelCase__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowerCamelCase__ : Dict = pred_prev_sample lowerCamelCase__ : int = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Dict = self.scheduler_classes[0] lowerCamelCase__ : int = self.get_scheduler_config() lowerCamelCase__ : Any = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase__ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCamelCase__ : str = self.get_scheduler_config() lowerCamelCase__ : str = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : str = [39, 30, 12, 1, 0] lowerCamelCase__ : Optional[Any] = len(UpperCamelCase__ ) with self.assertRaises(UpperCamelCase__ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Dict = self.scheduler_classes[0] lowerCamelCase__ : Optional[int] = self.get_scheduler_config() lowerCamelCase__ : int = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : int = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=UpperCamelCase__ )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Union[str, Any] =logging.get_logger(__name__) _A : List[str] ={ '''MIT/ast-finetuned-audioset-10-10-0.4593''': ( '''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json''' ), } class _lowercase ( _lowercase ): a = """audio-spectrogram-transformer""" def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : List[Any] = patch_size lowerCamelCase__ : List[str] = qkv_bias lowerCamelCase__ : Dict = frequency_stride lowerCamelCase__ : List[Any] = time_stride lowerCamelCase__ : str = max_length lowerCamelCase__ : Dict = num_mel_bins
631
1
'''simple docstring''' import math from datetime import datetime, timedelta def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> datetime: lowerCamelCase__ : Tuple = year % 19 lowerCamelCase__ : int = year % 4 lowerCamelCase__ : Tuple = year % 7 lowerCamelCase__ : Optional[int] = math.floor(year / 100 ) lowerCamelCase__ : Optional[int] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) lowerCamelCase__ : Optional[Any] = leap_day_inhibits / 4 lowerCamelCase__ : str = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 lowerCamelCase__ : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 lowerCamelCase__ : List[Any] = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon lowerCamelCase__ : int = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(UpperCamelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(UpperCamelCase , 4 , 18 ) else: return datetime(UpperCamelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_994, 2_000, 2_010, 2_021, 2_023): _A : Optional[int] ='''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
631
'''simple docstring''' import argparse import os import re import packaging.version _A : List[str] ='''examples/''' _A : Any ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _A : int ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } _A : int ='''README.md''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : List[str] = f.read() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern] lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase ) lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: for folder, directories, fnames in os.walk(UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not patch: update_version_in_examples(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ : Dict = """1. Want to contribute a new model?""" with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : int = f.readlines() # Find the start of the list. lowerCamelCase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ : List[Any] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ : int = f.read() lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0] return packaging.version.parse(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ : List[str] = default_version.base_version elif patch: lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Optional[int] = default_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase , patch=UpperCamelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE_ () -> List[str]: lowerCamelCase__ : Optional[int] = get_version() lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ : Any = current_version.base_version # Check with the user we got that right. lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Dict = dev_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _A : List[str] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
631
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _A : Optional[Any] ={ '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =['''OwlViTFeatureExtractor'''] _A : str =['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[Any] =[ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _A : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _A : Union[str, Any] =False class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ): set_seed(0 ) lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 ) lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCamelCase__ : List[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) lowerCamelCase__ : Any = DDIMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )] # train with a DDPM scheduler lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
631
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list: if len(UpperCamelCase ) <= 1: return [tuple(UpperCamelCase )] lowerCamelCase__ : List[Any] = [] def generate(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Tuple = [0] * n res.append(tuple(UpperCamelCase ) ) lowerCamelCase__ : str = 0 while i < n: if c[i] < i: if i % 2 == 0: lowerCamelCase__ , lowerCamelCase__ : Any = arr[i], arr[0] else: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = arr[i], arr[c[i]] res.append(tuple(UpperCamelCase ) ) c[i] += 1 lowerCamelCase__ : Any = 0 else: lowerCamelCase__ : Union[str, Any] = 0 i += 1 generate(len(UpperCamelCase ) , UpperCamelCase ) return res if __name__ == "__main__": _A : List[str] =input('''Enter numbers separated by a comma:\n''').strip() _A : Optional[int] =[int(item) for item in user_input.split(''',''')] print(heaps(arr))
631
'''simple docstring''' from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : Optional[int] = 0 # Number of processes finished lowerCamelCase__ : Union[str, Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ : Tuple = [0] * no_of_process # List to include calculation results lowerCamelCase__ : List[str] = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )] lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ : str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ : Union[str, Any] = arrival_time[i] lowerCamelCase__ : Any = 0 # Index showing the location of the process being performed lowerCamelCase__ : Union[str, Any] = 0 # Saves the current response ratio. lowerCamelCase__ : Any = 0 for i in range(0 , UpperCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ : int = temp lowerCamelCase__ : str = i # Calculate the turn around time lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : int = [0] * no_of_process for i in range(0 , UpperCamelCase ): lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A : List[str] =5 _A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E'''] _A : Optional[int] =[1, 2, 3, 4, 5] _A : Dict =[1, 2, 3, 4, 5] _A : Any =calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A : Optional[int] =calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
631
1
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( _lowercase , unittest.TestCase ): a = OpenAIGPTTokenizer a = OpenAIGPTTokenizerFast a = True a = False def lowerCamelCase_ ( self: List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCamelCase__ : Dict = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""] lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Dict ): return "lower newer", "lower newer" def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase__ : List[Any] = """lower""" lowerCamelCase__ : List[Any] = ["""low""", """er</w>"""] lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokens + ["""<unk>"""] lowerCamelCase__ : Tuple = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) # Simple input lowerCamelCase__ : Any = """This is a simple input""" lowerCamelCase__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""] lowerCamelCase__ : Tuple = ("""This is a simple input""", """This is a pair""") lowerCamelCase__ : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Simple input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Simple input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Pair input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , ) def lowerCamelCase_ ( self: Dict ): pass @require_ftfy @require_spacy @require_tokenizers class _lowercase ( _lowercase ): pass
631
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
631
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _A : int =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: print("""Loading config file...""" ) def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): lowerCamelCase__ : Optional[int] = [] for k, v in d.items(): lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCamelCase ) lowerCamelCase__ : Any = argparse.Namespace() with open(UpperCamelCase , """r""" ) as yaml_file: try: lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader ) lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase ) for k, v in flat_cfg.items(): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) ) return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig() lowerCamelCase__ : str = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ : Optional[Any] = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : int = 384 else: lowerCamelCase__ : Optional[int] = 256 lowerCamelCase__ : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ : Tuple = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : str = 384 else: lowerCamelCase__ : Any = 256 lowerCamelCase__ : int = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ : Dict = 151 lowerCamelCase__ : str = 512 lowerCamelCase__ : List[Any] = """ade20k-id2label.json""" lowerCamelCase__ : Union[str, Any] = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ : Tuple = 21 lowerCamelCase__ : Optional[int] = 512 lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json""" lowerCamelCase__ : Tuple = True # orig_config lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase ) assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ : Tuple = """huggingface/label-files""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple: if base_model: lowerCamelCase__ : Optional[int] = """""" else: lowerCamelCase__ : Optional[Any] = """mobilevitv2.""" lowerCamelCase__ : List[str] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ : Optional[Any] = k[8:] else: lowerCamelCase__ : Optional[Any] = k if ".block." in k: lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ : int = [0, 1] elif i == 4: lowerCamelCase__ : str = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ : List[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase ) # load original state_dict lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval() lowerCamelCase__ : Tuple = False else: lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval() lowerCamelCase__ : Optional[Any] = False # remove and rename some keys of load the original model lowerCamelCase__ : Tuple = checkpoint remove_unused_keys(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load modified state_dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ : str = model(**UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ : Dict = outputs.logits lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ) assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Dict =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
631
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : List[str] = mask_ratio lowerCamelCase__ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a = False a = False a = False a = False def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Dict ): pass def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) lowerCamelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Tuple = pt_noise super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy() lowerCamelCase__ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) # Make sure we don't have nans lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy() lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @slow def lowerCamelCase_ ( self: List[str] ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Tuple ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : List[str] = ViTMAEConfig() lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) ) # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
631
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_lowercase ) class _lowercase ( _lowercase ): a = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a = Features({"""image""": Image()} ) a = Features({"""labels""": ClassLabel} ) a = "image" a = "labels" def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, Any] ): if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCamelCase__ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowerCamelCase__ : str = copy.deepcopy(self ) lowerCamelCase__ : Optional[Any] = self.label_schema.copy() lowerCamelCase__ : Union[str, Any] = features[self.label_column] lowerCamelCase__ : Any = label_schema return task_template @property def lowerCamelCase_ ( self: List[Any] ): return { self.image_column: "image", self.label_column: "labels", }
631
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( _lowercase ): a = """""" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ): super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : List[Any] = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] ) lowerCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" ) def lowerCamelCase_ ( self: Tuple ): if self.dir_cache is None: lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} lowerCamelCase__ : int = {f["""name"""]: f} def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): return self.file.open().read() def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _lowercase ( _lowercase ): a = """bz2""" a = """bz2""" a = """.bz2""" class _lowercase ( _lowercase ): a = """gzip""" a = """gzip""" a = """.gz""" class _lowercase ( _lowercase ): a = """lz4""" a = """lz4""" a = """.lz4""" class _lowercase ( _lowercase ): a = """xz""" a = """xz""" a = """.xz""" class _lowercase ( _lowercase ): a = """zstd""" a = """zstd""" a = """.zst""" def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ): super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = file_ def __enter__( self: List[Any] ): self._file.__enter__() return self def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ): self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self: Any ): return iter(self._file ) def lowerCamelCase_ ( self: List[Any] ): return next(self._file ) def __getattr__( self: List[str] , UpperCamelCase__: Dict ): return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) lowerCamelCase__ : Optional[Any] = fixed_enter
631
1
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class _lowercase : def __init__( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any]=13 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: int=3 , UpperCamelCase__: Dict=16 , UpperCamelCase__: str=[1, 2, 1] , UpperCamelCase__: Tuple=[2, 2, 4] , UpperCamelCase__: Dict=2 , UpperCamelCase__: List[str]=2.0 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Any=False , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: List[Any]=1e-5 , UpperCamelCase__: Any=True , UpperCamelCase__: str=None , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: Optional[int]=["stage1", "stage2", "stage3"] , UpperCamelCase__: Dict=[1, 2, 3] , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Dict = batch_size lowerCamelCase__ : List[str] = image_size lowerCamelCase__ : Dict = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Any = embed_dim lowerCamelCase__ : Optional[int] = depths lowerCamelCase__ : Union[str, Any] = num_heads lowerCamelCase__ : Optional[int] = window_size lowerCamelCase__ : str = mlp_ratio lowerCamelCase__ : List[Any] = qkv_bias lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : Any = drop_path_rate lowerCamelCase__ : Optional[int] = hidden_act lowerCamelCase__ : Union[str, Any] = use_absolute_embeddings lowerCamelCase__ : str = patch_norm lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : Dict = encoder_stride lowerCamelCase__ : Union[str, Any] = out_features lowerCamelCase__ : Union[str, Any] = out_indices def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None if self.use_labels: lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Any ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ): lowerCamelCase__ : int = MaskFormerSwinModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase__ : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(UpperCamelCase__ ): lowerCamelCase__ : Dict = ["""stem"""] lowerCamelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = config_and_inputs lowerCamelCase__ : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} a = False a = False a = False a = False a = False def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : str = MaskFormerSwinModelTester(self ) lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowerCamelCase_ ( self: Union[str, Any] ): pass def lowerCamelCase_ ( self: Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self: Tuple ): return def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowerCamelCase_ ( self: Optional[Any] ): pass def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Dict = [*signature.parameters.keys()] lowerCamelCase__ : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowerCamelCase_ ( self: Optional[Any] ): pass def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Union[str, Any] = outputs.hidden_states lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # Swin has a different seq_length lowerCamelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase__ : Any = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Any = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Tuple = 3 lowerCamelCase__ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase__ : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase__ : Dict = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Dict = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCamelCase_ ( self: Any ): pass def lowerCamelCase_ ( self: int ): lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCamelCase__: int ): lowerCamelCase__ : str = 0 return t def check_equivalence(UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str]={} ): with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : Any = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple() def recursive_check(UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ): if isinstance(UpperCamelCase__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1e-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has''' F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.''' ) , ) recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) lowerCamelCase__ : List[str] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} ) @require_torch class _lowercase ( unittest.TestCase , _lowercase ): a = (MaskFormerSwinBackbone,) if is_torch_available() else () a = MaskFormerSwinConfig def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = MaskFormerSwinModelTester(self ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = backbone_class(UpperCamelCase__ ) backbone.to(UpperCamelCase__ ) backbone.eval() lowerCamelCase__ : str = backbone(**UpperCamelCase__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCamelCase__ : List[str] = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCamelCase__ : List[Any] = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ ) self.assertIsNotNone(outputs.attentions )
631
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _A : int =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: print("""Loading config file...""" ) def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): lowerCamelCase__ : Optional[int] = [] for k, v in d.items(): lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCamelCase ) lowerCamelCase__ : Any = argparse.Namespace() with open(UpperCamelCase , """r""" ) as yaml_file: try: lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader ) lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase ) for k, v in flat_cfg.items(): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) ) return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig() lowerCamelCase__ : str = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ : Optional[Any] = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : int = 384 else: lowerCamelCase__ : Optional[int] = 256 lowerCamelCase__ : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ : Tuple = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : str = 384 else: lowerCamelCase__ : Any = 256 lowerCamelCase__ : int = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ : Dict = 151 lowerCamelCase__ : str = 512 lowerCamelCase__ : List[Any] = """ade20k-id2label.json""" lowerCamelCase__ : Union[str, Any] = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ : Tuple = 21 lowerCamelCase__ : Optional[int] = 512 lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json""" lowerCamelCase__ : Tuple = True # orig_config lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase ) assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ : Tuple = """huggingface/label-files""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple: if base_model: lowerCamelCase__ : Optional[int] = """""" else: lowerCamelCase__ : Optional[Any] = """mobilevitv2.""" lowerCamelCase__ : List[str] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ : Optional[Any] = k[8:] else: lowerCamelCase__ : Optional[Any] = k if ".block." in k: lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ : int = [0, 1] elif i == 4: lowerCamelCase__ : str = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ : List[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase ) # load original state_dict lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval() lowerCamelCase__ : Tuple = False else: lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval() lowerCamelCase__ : Optional[Any] = False # remove and rename some keys of load the original model lowerCamelCase__ : Tuple = checkpoint remove_unused_keys(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load modified state_dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ : str = model(**UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ : Dict = outputs.logits lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ) assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Dict =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
631
1
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent _A : Tuple ={'''UserAgent''': UserAgent().random} def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict: lowerCamelCase__ : List[Any] = script.contents[0] lowerCamelCase__ : str = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _lowercase : def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[str] = F'''https://www.instagram.com/{username}/''' lowerCamelCase__ : Dict = self.get_json() def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Optional[Any] = requests.get(self.url , headers=UpperCamelCase__ ).text lowerCamelCase__ : Tuple = BeautifulSoup(UpperCamelCase__ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self: Union[str, Any] ): return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self: List[str] ): return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self: Tuple ): return self.user_data["username"] @property def lowerCamelCase_ ( self: Any ): return self.user_data["full_name"] @property def lowerCamelCase_ ( self: Tuple ): return self.user_data["biography"] @property def lowerCamelCase_ ( self: str ): return self.user_data["business_email"] @property def lowerCamelCase_ ( self: int ): return self.user_data["external_url"] @property def lowerCamelCase_ ( self: Tuple ): return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self: List[Any] ): return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self: Tuple ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self: Optional[int] ): return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self: Optional[Any] ): return self.user_data["is_verified"] @property def lowerCamelCase_ ( self: Tuple ): return self.user_data["is_private"] def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "github" ) -> None: import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions lowerCamelCase__ : int = InstagramUser(UpperCamelCase ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCamelCase ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() _A : Dict =InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
631
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) ) class _lowercase : def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = conv_kernel_size lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Tuple = width_multiplier lowerCamelCase__ : List[Any] = ffn_dropout lowerCamelCase__ : Any = attn_dropout def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: List[Any] ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ): lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) a = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = MobileViTVaModelTester(self ) lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple = [*signature.parameters.keys()] lowerCamelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs.hidden_states lowerCamelCase__ : List[Any] = 5 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ : int = 2 for i in range(len(UpperCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : str = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : str = outputs.logits # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Dict = model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = outputs.logits.detach().cpu() lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] ) lowerCamelCase__ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase__ : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
631
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> int: lowerCamelCase__ : List[Any] = limit + 1 lowerCamelCase__ : int = [0] * limit for first_term in range(1 , UpperCamelCase ): for n in range(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : str = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
631
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Optional[Any] =logging.get_logger(__name__) _A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Tuple ={ '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } _A : List[Any] ={ '''gpt-neox-20b''': 2_048, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = add_prefix_space def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : int = input_ids[-self.model_max_length :] return input_ids
631
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase__ : str = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , ) lowerCamelCase__ : Dict = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : str = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Dict = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ ) # set architectures equal to `None` lowerCamelCase__ : List[Any] = None lowerCamelCase__ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : Optional[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] ) lowerCamelCase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : Tuple = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : str = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : int = """sshleifer/tiny-gpt2""" lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] ) lowerCamelCase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Tuple = """sshleifer/tinier_bart""" lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] ) lowerCamelCase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Any = """sshleifer/tiny-gpt2""" lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : Tuple = PyTorchBenchmark(UpperCamelCase__ , configs=[config] ) lowerCamelCase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : str = """sshleifer/tinier_bart""" lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] ) lowerCamelCase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : str = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : int = PyTorchBenchmark(UpperCamelCase__ ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase__ , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase__ , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCamelCase__: str ): self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , ) lowerCamelCase__ : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Dict ={ '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =[ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Tuple =logging.get_logger(__name__) _A : str ={ '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class _lowercase ( _lowercase ): a = """wav2vec2""" def __init__( self: Union[str, Any] , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[Any]=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Union[str, Any]=0.0 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: str=1e-5 , UpperCamelCase__: int="group" , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: List[str]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__: Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__: Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=128 , UpperCamelCase__: Union[str, Any]=16 , UpperCamelCase__: str=False , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=0.05 , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: int=0.0 , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Optional[int]=0 , UpperCamelCase__: List[str]=320 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Tuple=100 , UpperCamelCase__: Tuple=256 , UpperCamelCase__: Tuple=256 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Dict="sum" , UpperCamelCase__: str=False , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: int=256 , UpperCamelCase__: List[str]=(512, 512, 512, 512, 1_500) , UpperCamelCase__: List[str]=(5, 3, 3, 1, 1) , UpperCamelCase__: Union[str, Any]=(1, 2, 3, 1, 1) , UpperCamelCase__: str=512 , UpperCamelCase__: Optional[Any]=0 , UpperCamelCase__: Tuple=1 , UpperCamelCase__: Any=2 , UpperCamelCase__: Any=False , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: str=None , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Dict , ): super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) lowerCamelCase__ : Tuple = hidden_size lowerCamelCase__ : Optional[Any] = feat_extract_norm lowerCamelCase__ : Union[str, Any] = feat_extract_activation lowerCamelCase__ : List[Any] = list(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = list(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = list(UpperCamelCase__ ) lowerCamelCase__ : Tuple = conv_bias lowerCamelCase__ : Optional[Any] = num_conv_pos_embeddings lowerCamelCase__ : Dict = num_conv_pos_embedding_groups lowerCamelCase__ : Dict = len(self.conv_dim ) lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[Any] = hidden_dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = feat_proj_dropout lowerCamelCase__ : List[Any] = final_dropout lowerCamelCase__ : Optional[int] = layerdrop lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Any = do_stable_layer_norm lowerCamelCase__ : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__ : str = apply_spec_augment lowerCamelCase__ : int = mask_time_prob lowerCamelCase__ : Any = mask_time_length lowerCamelCase__ : Optional[int] = mask_time_min_masks lowerCamelCase__ : List[str] = mask_feature_prob lowerCamelCase__ : List[str] = mask_feature_length lowerCamelCase__ : Optional[int] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase__ : Any = num_codevectors_per_group lowerCamelCase__ : Tuple = num_codevector_groups lowerCamelCase__ : str = contrastive_logits_temperature lowerCamelCase__ : Optional[Any] = feat_quantizer_dropout lowerCamelCase__ : Union[str, Any] = num_negatives lowerCamelCase__ : List[Any] = codevector_dim lowerCamelCase__ : Union[str, Any] = proj_codevector_dim lowerCamelCase__ : List[Any] = diversity_loss_weight # ctc loss lowerCamelCase__ : Dict = ctc_loss_reduction lowerCamelCase__ : Any = ctc_zero_infinity # adapter lowerCamelCase__ : str = add_adapter lowerCamelCase__ : str = adapter_kernel_size lowerCamelCase__ : List[str] = adapter_stride lowerCamelCase__ : Dict = num_adapter_layers lowerCamelCase__ : Dict = output_hidden_size or hidden_size lowerCamelCase__ : Optional[Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase__ : Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase__ : Optional[int] = list(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = list(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = list(UpperCamelCase__ ) lowerCamelCase__ : int = xvector_output_dim @property def lowerCamelCase_ ( self: str ): return functools.reduce(operator.mul , self.conv_stride , 1 )
631
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _A : int =get_tests_dir('''fixtures/test_sentencepiece.model''') _A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''} _A : int ='''>>zh<<''' _A : Dict ='''Helsinki-NLP/''' if is_torch_available(): _A : List[Any] ='''pt''' elif is_tf_available(): _A : Optional[int] ='''tf''' else: _A : Dict ='''jax''' @require_sentencepiece class _lowercase ( _lowercase , unittest.TestCase ): a = MarianTokenizer a = False a = True def lowerCamelCase_ ( self: List[str] ): super().setUp() lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ): return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = """</s>""" lowerCamelCase__ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase__ : List[str] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase_ ( self: List[str] ): # fmt: off lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) lowerCamelCase__ : str = """Tämä on testi""" lowerCamelCase__ : Any = """This is a test""" lowerCamelCase__ : int = [76, 7, 2_047, 2] lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2] lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
631
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowercase ( _lowercase ): a = """SpeechT5FeatureExtractor""" a = """SpeechT5Tokenizer""" def __init__( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: str ): super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self: Tuple , *UpperCamelCase__: Dict , **UpperCamelCase__: List[Any] ): lowerCamelCase__ : str = kwargs.pop("""audio""" , UpperCamelCase__ ) lowerCamelCase__ : Any = kwargs.pop("""text""" , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = kwargs.pop("""text_target""" , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = kwargs.pop("""audio_target""" , UpperCamelCase__ ) lowerCamelCase__ : int = kwargs.pop("""sampling_rate""" , UpperCamelCase__ ) if audio is not None and text is not None: raise ValueError( """Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" ) if audio_target is not None and text_target is not None: raise ValueError( """Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( """You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" ) if audio is not None: lowerCamelCase__ : Tuple = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ ) elif text is not None: lowerCamelCase__ : Any = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) else: lowerCamelCase__ : List[Any] = None if audio_target is not None: lowerCamelCase__ : Optional[Any] = self.feature_extractor(audio_target=UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = targets["""input_values"""] elif text_target is not None: lowerCamelCase__ : Dict = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : List[Any] = targets["""input_ids"""] else: lowerCamelCase__ : Optional[Any] = None if inputs is None: return targets if targets is not None: lowerCamelCase__ : str = labels lowerCamelCase__ : Any = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: lowerCamelCase__ : int = decoder_attention_mask return inputs def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: Dict , **UpperCamelCase__: int ): lowerCamelCase__ : Union[str, Any] = kwargs.pop("""input_values""" , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = kwargs.pop("""input_ids""" , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = kwargs.pop("""labels""" , UpperCamelCase__ ) if input_values is not None and input_ids is not None: raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" ) if input_values is None and input_ids is None and labels is None: raise ValueError( """You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" ) if input_values is not None: lowerCamelCase__ : int = self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) elif input_ids is not None: lowerCamelCase__ : Union[str, Any] = self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ ) else: lowerCamelCase__ : List[Any] = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and "input_ids" in labels[0]): lowerCamelCase__ : Dict = self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = targets["""input_ids"""] else: lowerCamelCase__ : int = self.feature_extractor.feature_size lowerCamelCase__ : Union[str, Any] = self.feature_extractor.num_mel_bins lowerCamelCase__ : Optional[Any] = self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : List[Any] = feature_size_hack lowerCamelCase__ : List[str] = targets["""input_values"""] else: lowerCamelCase__ : List[str] = None if inputs is None: return targets if targets is not None: lowerCamelCase__ : str = labels lowerCamelCase__ : Optional[Any] = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: lowerCamelCase__ : List[Any] = decoder_attention_mask return inputs def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: Tuple , **UpperCamelCase__: Tuple ): return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , *UpperCamelCase__: int , **UpperCamelCase__: Tuple ): return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """rwkv""" a = {"""max_position_embeddings""": """context_length"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = context_length lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase__ : List[str] = layer_norm_epsilon lowerCamelCase__ : int = rescale_every lowerCamelCase__ : Optional[int] = use_cache lowerCamelCase__ : Dict = bos_token_id lowerCamelCase__ : Any = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( _lowercase , unittest.TestCase ): a = BarthezTokenizer a = BarthezTokenizerFast a = True a = True def lowerCamelCase_ ( self: Tuple ): super().setUp() lowerCamelCase__ : Dict = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = tokenizer def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Optional[Any] = """<pad>""" lowerCamelCase__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(UpperCamelCase__ ) , 101_122 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 101_122 ) @require_torch def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCamelCase__ : str = [0, 57, 3_018, 70_307, 91, 2] lowerCamelCase__ : Optional[Any] = self.tokenizer( UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): if not self.test_rust_tokenizer: return lowerCamelCase__ : List[str] = self.get_tokenizer() lowerCamelCase__ : Optional[int] = self.get_rust_tokenizer() lowerCamelCase__ : List[Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCamelCase__ ) lowerCamelCase__ : Dict = rust_tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) lowerCamelCase__ : Dict = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = self.get_rust_tokenizer() lowerCamelCase__ : Tuple = tokenizer.encode(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[int] ): # fmt: off lowerCamelCase__ : List[str] = {"""input_ids""": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : str = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=UpperCamelCase__ , )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : str =logging.get_logger(__name__) _A : int ={ '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """roc_bert""" def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Optional[Any] = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Tuple = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Tuple = type_vocab_size lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : List[Any] = use_cache lowerCamelCase__ : Tuple = enable_pronunciation lowerCamelCase__ : Union[str, Any] = enable_shape lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim lowerCamelCase__ : Any = pronunciation_vocab_size lowerCamelCase__ : int = shape_embed_dim lowerCamelCase__ : Tuple = shape_vocab_size lowerCamelCase__ : Optional[Any] = concat_input lowerCamelCase__ : str = position_embedding_type lowerCamelCase__ : Dict = classifier_dropout super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowercase ( _lowercase ): a = (IPNDMScheduler,) a = (("""num_inference_steps""", 50),) def lowerCamelCase_ ( self: int , **UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Optional[int] = {"""num_train_timesteps""": 1_000} config.update(**UpperCamelCase__ ) return config def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int]=0 , **UpperCamelCase__: List[Any] ): lowerCamelCase__ : Optional[Any] = dict(self.forward_default_kwargs ) lowerCamelCase__ : List[str] = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ ) lowerCamelCase__ : List[str] = self.dummy_sample lowerCamelCase__ : List[str] = 0.1 * sample lowerCamelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCamelCase__ : str = self.get_scheduler_config(**UpperCamelCase__ ) lowerCamelCase__ : int = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals lowerCamelCase__ : int = dummy_past_residuals[:] if time_step is None: lowerCamelCase__ : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = scheduler_class.from_pretrained(UpperCamelCase__ ) new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals lowerCamelCase__ : str = dummy_past_residuals[:] lowerCamelCase__ : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase__ : Any = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowerCamelCase__ : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase__ : List[Any] = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self: Any ): pass def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Dict=0 , **UpperCamelCase__: List[str] ): lowerCamelCase__ : List[str] = dict(self.forward_default_kwargs ) lowerCamelCase__ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = self.dummy_sample lowerCamelCase__ : int = 0.1 * sample lowerCamelCase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCamelCase__ : int = self.get_scheduler_config() lowerCamelCase__ : Optional[int] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase__ : Tuple = dummy_past_residuals[:] if time_step is None: lowerCamelCase__ : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = scheduler_class.from_pretrained(UpperCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase__ : Tuple = dummy_past_residuals[:] lowerCamelCase__ : Any = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase__ : Any = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowerCamelCase__ : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase__ : List[Any] = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self: Optional[int] , **UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = self.scheduler_classes[0] lowerCamelCase__ : Optional[Any] = self.get_scheduler_config(**UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = 10 lowerCamelCase__ : int = self.dummy_model() lowerCamelCase__ : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ : int = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample return sample def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Tuple = dict(self.forward_default_kwargs ) lowerCamelCase__ : List[Any] = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ ) for scheduler_class in self.scheduler_classes: lowerCamelCase__ : int = self.get_scheduler_config() lowerCamelCase__ : Optional[int] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : str = self.dummy_sample lowerCamelCase__ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ): scheduler.set_timesteps(UpperCamelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ): lowerCamelCase__ : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCamelCase__ : Tuple = dummy_past_residuals[:] lowerCamelCase__ : int = scheduler.timesteps[5] lowerCamelCase__ : Tuple = scheduler.timesteps[6] lowerCamelCase__ : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase__ : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCamelCase__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase__ : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self: Tuple ): for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[Any] = self.full_loop() lowerCamelCase__ : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
631
'''simple docstring''' import sys import turtle def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) _A : Any =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') _A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
631
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> int: lowerCamelCase__ : List[str] = set(range(3 , UpperCamelCase , 2 ) ) primes.add(2 ) for p in range(3 , UpperCamelCase , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCamelCase , UpperCamelCase ) ) ) lowerCamelCase__ : Tuple = [float(UpperCamelCase ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCamelCase , limit + 1 , UpperCamelCase ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'{solution() = }')
631
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowercase : def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Union[str, Any] = 13 lowerCamelCase__ : Any = 7 lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : str = 99 lowerCamelCase__ : Dict = 384 lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Optional[Any] = 37 lowerCamelCase__ : Union[str, Any] = """gelu""" lowerCamelCase__ : int = 0.1 lowerCamelCase__ : Optional[Any] = 0.1 lowerCamelCase__ : List[Any] = 512 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Any = 2 lowerCamelCase__ : Optional[Any] = 0.02 lowerCamelCase__ : int = 3 lowerCamelCase__ : List[str] = 4 lowerCamelCase__ : Any = 128 lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Optional[Any] = 9 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Optional[int] = None def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : str = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : int = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ ) lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Tuple = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : int = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ): lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ ) lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : str = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a = False a = False a = False def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = TFConvBertModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = True lowerCamelCase__ : Tuple = True if hasattr(UpperCamelCase__ , """use_cache""" ): lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ ) lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""] lowerCamelCase__ : Any = outputs["""encoder_attentions"""] else: lowerCamelCase__ : int = outputs["""hidden_states"""] lowerCamelCase__ : Optional[int] = outputs["""attentions"""] self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase__ : Any = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase__: List[str] ): lowerCamelCase__ : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : List[Any] = True lowerCamelCase__ : int = True lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : Dict = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
631
1
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : int = 10 lowerCamelCase__ : Optional[Any] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCamelCase__ : List[Any] = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10, """id""": list(range(UpperCamelCase ) ), } , features=UpperCamelCase , ) return dataset @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCamelCase__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=UpperCamelCase ) return filename # FILE_CONTENT + files _A : Optional[Any] ='''\ Text data. Second line of data.''' @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]: lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCamelCase__ : Tuple = FILE_CONTENT with open(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase ) return filename @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: import bza lowerCamelCase__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCamelCase__ : Any = bytes(UpperCamelCase , """utf-8""" ) with bza.open(UpperCamelCase , """wb""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]: import gzip lowerCamelCase__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCamelCase__ : Tuple = bytes(UpperCamelCase , """utf-8""" ) with gzip.open(UpperCamelCase , """wb""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCamelCase__ : Any = bytes(UpperCamelCase , """utf-8""" ) with lza.frame.open(UpperCamelCase , """wb""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]: if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCamelCase__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(UpperCamelCase , """w""" ) as archive: archive.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: import tarfile lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(UpperCamelCase , """w""" ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: import lzma lowerCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCamelCase__ : Dict = bytes(UpperCamelCase , """utf-8""" ) with lzma.open(UpperCamelCase , """wb""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: import zipfile lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCamelCase__ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCamelCase__ : str = bytes(UpperCamelCase , """utf-8""" ) with zstd.open(UpperCamelCase , """wb""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCamelCase__ : Union[str, Any] = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase ) return filename _A : Optional[int] =[ {'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0}, ] _A : int =[ {'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0}, {'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0}, ] _A : Any ={ '''col_1''': ['''0''', '''1''', '''2''', '''3'''], '''col_2''': [0, 1, 2, 3], '''col_3''': [0.0, 1.0, 2.0, 3.0], } _A : Dict =[ {'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0}, {'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1}, ] _A : Any =[ {'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0}, ] @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ () -> int: return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: lowerCamelCase__ : int = datasets.Dataset.from_dict(UpperCamelCase ) lowerCamelCase__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con: lowerCamelCase__ : Tuple = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any: lowerCamelCase__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(UpperCamelCase , """w""" , newline="""""" ) as f: lowerCamelCase__ : List[Any] = csv.DictWriter(UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(UpperCamelCase , """w""" , newline="""""" ) as f: lowerCamelCase__ : int = csv.DictWriter(UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]: import bza lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(UpperCamelCase , """rb""" ) as f: lowerCamelCase__ : List[str] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(UpperCamelCase , """wb""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any: lowerCamelCase__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCamelCase__ : Union[str, Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(UpperCamelCase , """wb""" ) as f: lowerCamelCase__ : Optional[Any] = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase ) lowerCamelCase__ : Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase ) )] for k in DATA[0]} , schema=UpperCamelCase ) writer.write_table(UpperCamelCase ) writer.close() return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: lowerCamelCase__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCamelCase__ : List[str] = {"""data""": DATA} with open(UpperCamelCase , """w""" ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: lowerCamelCase__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCamelCase__ : str = {"""data""": DATA_DICT_OF_LISTS} with open(UpperCamelCase , """w""" ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any: lowerCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(UpperCamelCase , """w""" ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(UpperCamelCase , """w""" ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: lowerCamelCase__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(UpperCamelCase , """w""" ) as f: for item in DATA_312: f.write(json.dumps(UpperCamelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(UpperCamelCase , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(UpperCamelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: import gzip lowerCamelCase__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(UpperCamelCase , """rb""" ) as orig_file: with gzip.open(UpperCamelCase , """wb""" ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: import gzip lowerCamelCase__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(UpperCamelCase , """rb""" ) as orig_file: with gzip.open(UpperCamelCase , """wb""" ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(UpperCamelCase , """w""" ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : str = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(UpperCamelCase , """w""" ) as f: f.add(UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: lowerCamelCase__ : Dict = ["""0""", """1""", """2""", """3"""] lowerCamelCase__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(UpperCamelCase , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Optional[Any] = ["""0""", """1""", """2""", """3"""] lowerCamelCase__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(UpperCamelCase , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: lowerCamelCase__ : List[str] = ["""0""", """1""", """2""", """3"""] lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(UpperCamelCase , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCamelCase__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : int = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCamelCase__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ () -> Dict: return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ () -> List[str]: return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(UpperCamelCase , """w""" ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any: lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 10 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 10 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 10 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 10 ) return data_dir
631
'''simple docstring''' _A : List[str] ='''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
631
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _A : Union[str, Any] =datasets.utils.logging.get_logger(__name__) @dataclass class _lowercase ( datasets.BuilderConfig ): a = None a = "utf-8" a = None a = None a = True # deprecated a = None # deprecated a = 10 << 20 # 10MB a = None class _lowercase ( datasets.ArrowBasedBuilder ): a = JsonConfig def lowerCamelCase_ ( self: str ): if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) lowerCamelCase__ : str = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) lowerCamelCase__ : Tuple = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCamelCase__ , (str, list, tuple) ): lowerCamelCase__ : Optional[Any] = data_files if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : List[str] = [files] lowerCamelCase__ : str = [dl_manager.iter_files(UpperCamelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] lowerCamelCase__ : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : List[Any] = [files] lowerCamelCase__ : Dict = [dl_manager.iter_files(UpperCamelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase_ ( self: str , UpperCamelCase__: pa.Table ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCamelCase__ : Tuple = self.config.features.arrow_schema.field(UpperCamelCase__ ).type lowerCamelCase__ : Any = pa_table.append_column(UpperCamelCase__ , pa.array([None] * len(UpperCamelCase__ ) , type=UpperCamelCase__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCamelCase__ : Optional[Any] = table_cast(UpperCamelCase__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[int] ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ ) # We keep only the field we are interested in lowerCamelCase__ : Optional[int] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCamelCase__ , (list, tuple) ): lowerCamelCase__ : Any = set().union(*[row.keys() for row in dataset] ) lowerCamelCase__ : Tuple = {col: [row.get(UpperCamelCase__ ) for row in dataset] for col in keys} else: lowerCamelCase__ : Optional[Any] = dataset lowerCamelCase__ : Dict = pa.Table.from_pydict(UpperCamelCase__ ) yield file_idx, self._cast_table(UpperCamelCase__ ) # If the file has one json object per line else: with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase__ : Dict = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCamelCase__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 ) lowerCamelCase__ : List[str] = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: lowerCamelCase__ : List[str] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCamelCase__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCamelCase__ : Union[str, Any] = batch.decode(self.config.encoding , errors=UpperCamelCase__ ).encode("""utf-8""" ) try: while True: try: lowerCamelCase__ : List[str] = paj.read_json( io.BytesIO(UpperCamelCase__ ) , read_options=paj.ReadOptions(block_size=UpperCamelCase__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCamelCase__ , pa.ArrowInvalid ) and "straddling" not in str(UpperCamelCase__ ) or block_size > len(UpperCamelCase__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'''Batch of {len(UpperCamelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCamelCase__ : Tuple = json.load(UpperCamelCase__ ) except json.JSONDecodeError: logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # list is the only sequence type supported in JSON try: lowerCamelCase__ : Tuple = set().union(*[row.keys() for row in dataset] ) lowerCamelCase__ : Any = {col: [row.get(UpperCamelCase__ ) for row in dataset] for col in keys} lowerCamelCase__ : List[Any] = pa.Table.from_pydict(UpperCamelCase__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' ) raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(UpperCamelCase__ ) break else: logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' ) raise ValueError( F'''Not able to read records in the JSON file at {file}. ''' F'''You should probably indicate the field of the JSON file containing your records. ''' F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCamelCase__ ) batch_idx += 1
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Any =logging.get_logger(__name__) _A : Dict ={ '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _lowercase ( _lowercase ): a = """trocr""" a = ["""past_key_values"""] a = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ): lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Tuple = d_model lowerCamelCase__ : Any = decoder_layers lowerCamelCase__ : Dict = decoder_attention_heads lowerCamelCase__ : str = decoder_ffn_dim lowerCamelCase__ : Tuple = activation_function lowerCamelCase__ : Dict = max_position_embeddings lowerCamelCase__ : int = dropout lowerCamelCase__ : int = attention_dropout lowerCamelCase__ : List[Any] = activation_dropout lowerCamelCase__ : Union[str, Any] = init_std lowerCamelCase__ : Optional[int] = decoder_layerdrop lowerCamelCase__ : Dict = use_cache lowerCamelCase__ : Any = scale_embedding lowerCamelCase__ : Optional[int] = use_learned_position_embeddings lowerCamelCase__ : List[str] = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
631
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) ) class _lowercase : def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = conv_kernel_size lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Tuple = width_multiplier lowerCamelCase__ : List[Any] = ffn_dropout lowerCamelCase__ : Any = attn_dropout def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: List[Any] ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ): lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) a = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = MobileViTVaModelTester(self ) lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple = [*signature.parameters.keys()] lowerCamelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs.hidden_states lowerCamelCase__ : List[Any] = 5 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ : int = 2 for i in range(len(UpperCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : str = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : str = outputs.logits # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Dict = model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = outputs.logits.detach().cpu() lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] ) lowerCamelCase__ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase__ : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
631
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : str = [False] * len(UpperCamelCase ) lowerCamelCase__ : str = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Union[str, Any] = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
631
1
'''simple docstring''' import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : List[Any] = """mock-s3-bucket""" lowerCamelCase__ : List[Any] = f'''s3://{mock_bucket}''' lowerCamelCase__ : Tuple = extract_path_from_uri(UpperCamelCase ) assert dataset_path.startswith("""s3://""" ) is False lowerCamelCase__ : int = """./local/path""" lowerCamelCase__ : Union[str, Any] = extract_path_from_uri(UpperCamelCase ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = is_remote_filesystem(UpperCamelCase ) assert is_remote is True lowerCamelCase__ : Optional[Any] = fsspec.filesystem("""file""" ) lowerCamelCase__ : Any = is_remote_filesystem(UpperCamelCase ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} lowerCamelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: lowerCamelCase__ : Optional[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Any = os.path.basename(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f, open(UpperCamelCase , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : Any = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} lowerCamelCase__ : List[str] = compressed_file_paths[protocol] lowerCamelCase__ : List[Any] = """dataset.jsonl""" lowerCamelCase__ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}''' lowerCamelCase__ , *lowerCamelCase__ : str = fsspec.get_fs_token_paths(UpperCamelCase ) assert fs.isfile(UpperCamelCase ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Tuple = hf_api.dataset_info(UpperCamelCase , token=UpperCamelCase ) lowerCamelCase__ : Any = HfFileSystem(repo_info=UpperCamelCase , token=UpperCamelCase ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(UpperCamelCase ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Dict = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(UpperCamelCase , UpperCamelCase , clobber=UpperCamelCase ) with pytest.warns(UpperCamelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(UpperCamelCase ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
631
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Optional[int] = dataset lowerCamelCase__ : Optional[int] = process lowerCamelCase__ : List[str] = params def __len__( self: List[str] ): return len(self.dataset ) def __getitem__( self: Any , UpperCamelCase__: int ): lowerCamelCase__ : Dict = self.dataset[i] lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params ) return processed class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ): lowerCamelCase__ : int = loader lowerCamelCase__ : str = infer lowerCamelCase__ : Optional[int] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : int = loader_batch_size # Internal bookkeeping lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None def __len__( self: Dict ): return len(self.loader ) def __iter__( self: Optional[int] ): lowerCamelCase__ : List[Any] = iter(self.loader ) return self def lowerCamelCase_ ( self: Any ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase__ : int = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Convert ModelOutput to tuple first lowerCamelCase__ : str = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase__ : List[str] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase__ : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ ) self._loader_batch_index += 1 return result def lowerCamelCase_ ( self: List[Any] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase__ : Optional[Any] = next(self.iterator ) lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Optional[Any] = processed else: lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0] lowerCamelCase__ : Any = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Any = len(UpperCamelCase__ ) else: lowerCamelCase__ : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase__ : List[Any] = processed lowerCamelCase__ : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _lowercase ( _lowercase ): def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ): super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __iter__( self: Union[str, Any] ): lowerCamelCase__ : str = iter(self.loader ) lowerCamelCase__ : int = None return self def lowerCamelCase_ ( self: str ): if self.subiterator is None: lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase__ : Tuple = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase__ : Union[str, Any] = next(self.subiterator ) return processed class _lowercase ( _lowercase ): def __iter__( self: List[Any] ): lowerCamelCase__ : int = iter(self.loader ) return self def lowerCamelCase_ ( self: Tuple ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase__ : List[str] = False lowerCamelCase__ : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : Any = self.loader_batch_item() lowerCamelCase__ : Tuple = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator while not is_last: lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Dict = processed else: lowerCamelCase__ : Dict = list(processed.keys() )[0] lowerCamelCase__ : Dict = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) else: lowerCamelCase__ : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : str = observed_batch_size lowerCamelCase__ : str = processed lowerCamelCase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : List[Any] = self.loader_batch_item() lowerCamelCase__ : str = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator else: lowerCamelCase__ : Optional[Any] = processed lowerCamelCase__ : Optional[int] = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) return accumulator class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ): lowerCamelCase__ : Union[str, Any] = dataset lowerCamelCase__ : str = key def __len__( self: Optional[Any] ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Any ): return self.dataset[i][self.key] class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ): lowerCamelCase__ : str = dataset lowerCamelCase__ : Dict = keya lowerCamelCase__ : List[str] = keya def __len__( self: str ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
631
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 50 ) -> int: lowerCamelCase__ : List[str] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'{solution() = }')
631
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _A : Dict ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
631
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : Tuple ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict =['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Tuple =['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : List[str] =[ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[Any] =[ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _A : Any ={ '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> int: lowerCamelCase__ : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase__ : Dict = """""" else: lowerCamelCase__ : List[str] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ : Any = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase__ : str = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ : str = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowerCamelCase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ : Tuple = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: lowerCamelCase__ : List[Any] = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase__ : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : List[str] = dct.pop(UpperCamelCase ) lowerCamelCase__ : List[str] = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCamelCase__ : int = ViTMSNConfig() lowerCamelCase__ : List[str] = 1000 lowerCamelCase__ : Dict = """datasets/huggingface/label-files""" lowerCamelCase__ : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase ) , """r""" ) ) lowerCamelCase__ : Tuple = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Union[str, Any] = idalabel lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase__ : Optional[Any] = 384 lowerCamelCase__ : Tuple = 1536 lowerCamelCase__ : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase__ : Tuple = 1024 lowerCamelCase__ : Tuple = 4096 lowerCamelCase__ : List[Any] = 24 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Dict = 0.1 elif "b4" in checkpoint_url: lowerCamelCase__ : List[str] = 4 elif "l7" in checkpoint_url: lowerCamelCase__ : Union[str, Any] = 7 lowerCamelCase__ : List[str] = 1024 lowerCamelCase__ : Any = 4096 lowerCamelCase__ : Tuple = 24 lowerCamelCase__ : Optional[int] = 16 lowerCamelCase__ : List[str] = 0.1 lowerCamelCase__ : int = ViTMSNModel(UpperCamelCase ) lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""target_encoder"""] lowerCamelCase__ : Tuple = ViTImageProcessor(size=config.image_size ) remove_projection_head(UpperCamelCase ) lowerCamelCase__ : Optional[int] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) read_in_q_k_v(UpperCamelCase , UpperCamelCase , base_model=UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() lowerCamelCase__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ : str = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) lowerCamelCase__ : Dict = ViTImageProcessor( size=config.image_size , image_mean=UpperCamelCase , image_std=UpperCamelCase ) lowerCamelCase__ : str = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase ) lowerCamelCase__ : int = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase__ : Union[str, Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: lowerCamelCase__ : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: lowerCamelCase__ : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: lowerCamelCase__ : int = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: lowerCamelCase__ : Dict = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase , atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Any =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Union[str, Any] =logging.get_logger(__name__) _A : List[str] ={ '''MIT/ast-finetuned-audioset-10-10-0.4593''': ( '''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json''' ), } class _lowercase ( _lowercase ): a = """audio-spectrogram-transformer""" def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : List[Any] = patch_size lowerCamelCase__ : List[str] = qkv_bias lowerCamelCase__ : Dict = frequency_stride lowerCamelCase__ : List[Any] = time_stride lowerCamelCase__ : str = max_length lowerCamelCase__ : Dict = num_mel_bins
631
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: List[str]=32 , UpperCamelCase__: Dict=3 , UpperCamelCase__: str=4 , UpperCamelCase__: Optional[Any]=[10, 20, 30, 40] , UpperCamelCase__: Optional[Any]=[2, 2, 3, 2] , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: List[Any]=["stage2", "stage3", "stage4"] , UpperCamelCase__: Any=[2, 3, 4] , UpperCamelCase__: Optional[Any]=None , ): lowerCamelCase__ : int = parent lowerCamelCase__ : Union[str, Any] = batch_size lowerCamelCase__ : Any = image_size lowerCamelCase__ : str = num_channels lowerCamelCase__ : Any = num_stages lowerCamelCase__ : List[Any] = hidden_sizes lowerCamelCase__ : Union[str, Any] = depths lowerCamelCase__ : Dict = is_training lowerCamelCase__ : Any = use_labels lowerCamelCase__ : int = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Dict = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = out_features lowerCamelCase__ : Dict = out_indices lowerCamelCase__ : int = scope def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : List[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: int ): lowerCamelCase__ : List[str] = ConvNextModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = ConvNextForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = ConvNextBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs lowerCamelCase__ : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) a = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) a = True a = False a = False a = False a = False def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = ConvNextModelTester(self ) lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self: Tuple ): return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def lowerCamelCase_ ( self: Optional[Any] ): pass def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : int = [*signature.parameters.keys()] lowerCamelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : List[str] = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[str] = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Any = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: List[Any] ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = ConvNextModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[str]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Union[str, Any] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Dict = self.default_image_processor lowerCamelCase__ : int = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[str] = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @require_torch class _lowercase ( unittest.TestCase , _lowercase ): a = (ConvNextBackbone,) if is_torch_available() else () a = ConvNextConfig a = False def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = ConvNextModelTester(self )
631
'''simple docstring''' import argparse import os import re import packaging.version _A : List[str] ='''examples/''' _A : Any ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _A : int ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } _A : int ='''README.md''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : List[str] = f.read() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern] lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase ) lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: for folder, directories, fnames in os.walk(UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not patch: update_version_in_examples(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ : Dict = """1. Want to contribute a new model?""" with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : int = f.readlines() # Find the start of the list. lowerCamelCase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ : List[Any] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ : int = f.read() lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0] return packaging.version.parse(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ : List[str] = default_version.base_version elif patch: lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Optional[int] = default_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase , patch=UpperCamelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE_ () -> List[str]: lowerCamelCase__ : Optional[int] = get_version() lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ : Any = current_version.base_version # Check with the user we got that right. lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Dict = dev_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _A : List[str] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
631
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple[complex, complex]: if a == 0: raise ValueError("""Coefficient 'a' must not be zero.""" ) lowerCamelCase__ : str = b * b - 4 * a * c lowerCamelCase__ : List[str] = (-b + sqrt(UpperCamelCase )) / (2 * a) lowerCamelCase__ : Dict = (-b - sqrt(UpperCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def SCREAMING_SNAKE_CASE_ () -> int: lowerCamelCase__ , lowerCamelCase__ : str = quadratic_roots(a=5 , b=6 , c=1 ) print(f'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
631
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _A : Union[str, Any] =False class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ): set_seed(0 ) lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 ) lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCamelCase__ : List[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) lowerCamelCase__ : Any = DDIMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )] # train with a DDPM scheduler lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
631
1
'''simple docstring''' from math import loga def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("""Input value must be a 'int' type""" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
631
'''simple docstring''' from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : Optional[int] = 0 # Number of processes finished lowerCamelCase__ : Union[str, Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ : Tuple = [0] * no_of_process # List to include calculation results lowerCamelCase__ : List[str] = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )] lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ : str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ : Union[str, Any] = arrival_time[i] lowerCamelCase__ : Any = 0 # Index showing the location of the process being performed lowerCamelCase__ : Union[str, Any] = 0 # Saves the current response ratio. lowerCamelCase__ : Any = 0 for i in range(0 , UpperCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ : int = temp lowerCamelCase__ : str = i # Calculate the turn around time lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : int = [0] * no_of_process for i in range(0 , UpperCamelCase ): lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A : List[str] =5 _A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E'''] _A : Optional[int] =[1, 2, 3, 4, 5] _A : Dict =[1, 2, 3, 4, 5] _A : Any =calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A : Optional[int] =calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
631
1
'''simple docstring''' import os from datetime import datetime as dt from github import Github _A : Dict =[ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def SCREAMING_SNAKE_CASE_ () -> Any: lowerCamelCase__ : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] ) lowerCamelCase__ : Optional[Any] = g.get_repo("""huggingface/diffusers""" ) lowerCamelCase__ : str = repo.get_issues(state="""open""" ) for issue in open_issues: lowerCamelCase__ : Optional[Any] = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase ) lowerCamelCase__ : str = comments[0] if len(UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
631
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
631
1
'''simple docstring''' from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float: lowerCamelCase__ : Union[str, Any] = u for i in range(1 , UpperCamelCase ): lowerCamelCase__ : Optional[Any] = temp * (u - i) return temp def SCREAMING_SNAKE_CASE_ () -> None: lowerCamelCase__ : int = int(input("""enter the numbers of values: """ ) ) lowerCamelCase__ : list[list[float]] = [] for _ in range(UpperCamelCase ): y.append([] ) for i in range(UpperCamelCase ): for j in range(UpperCamelCase ): y[i].append(UpperCamelCase ) lowerCamelCase__ : int = 0 print("""enter the values of parameters in a list: """ ) lowerCamelCase__ : Dict = list(map(UpperCamelCase , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(UpperCamelCase ): lowerCamelCase__ : List[Any] = float(input() ) lowerCamelCase__ : Union[str, Any] = int(input("""enter the value to interpolate: """ ) ) lowerCamelCase__ : List[str] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase ): for j in range(n - i ): lowerCamelCase__ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1] lowerCamelCase__ : Any = y[0][0] for i in range(1 , UpperCamelCase ): summ += (ucal(UpperCamelCase , UpperCamelCase ) * y[0][i]) / math.factorial(UpperCamelCase ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
631
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : List[str] = mask_ratio lowerCamelCase__ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a = False a = False a = False a = False def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Dict ): pass def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) lowerCamelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Tuple = pt_noise super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy() lowerCamelCase__ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) # Make sure we don't have nans lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy() lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @slow def lowerCamelCase_ ( self: List[str] ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Tuple ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : List[str] = ViTMAEConfig() lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) ) # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
631
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowercase ( _lowercase ): a = ["""image_processor""", """tokenizer"""] a = """ChineseCLIPImageProcessor""" a = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self: Tuple , UpperCamelCase__: int=None , UpperCamelCase__: Dict=None , **UpperCamelCase__: Dict ): lowerCamelCase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase__ , ) lowerCamelCase__ : List[str] = kwargs.pop("""feature_extractor""" ) lowerCamelCase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = self.image_processor def __call__( self: Any , UpperCamelCase__: str=None , UpperCamelCase__: int=None , UpperCamelCase__: Optional[Any]=None , **UpperCamelCase__: int ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: lowerCamelCase__ : Dict = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if images is not None: lowerCamelCase__ : Union[str, Any] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None and images is not None: lowerCamelCase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict , *UpperCamelCase__: Any , **UpperCamelCase__: Optional[Any] ): return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Dict , **UpperCamelCase__: Union[str, Any] ): return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : str = self.tokenizer.model_input_names lowerCamelCase__ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCamelCase_ ( self: List[str] ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , ) return self.image_processor_class
631
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( _lowercase ): a = """""" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ): super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : List[Any] = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] ) lowerCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" ) def lowerCamelCase_ ( self: Tuple ): if self.dir_cache is None: lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} lowerCamelCase__ : int = {f["""name"""]: f} def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): return self.file.open().read() def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _lowercase ( _lowercase ): a = """bz2""" a = """bz2""" a = """.bz2""" class _lowercase ( _lowercase ): a = """gzip""" a = """gzip""" a = """.gz""" class _lowercase ( _lowercase ): a = """lz4""" a = """lz4""" a = """.lz4""" class _lowercase ( _lowercase ): a = """xz""" a = """xz""" a = """.xz""" class _lowercase ( _lowercase ): a = """zstd""" a = """zstd""" a = """.zst""" def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ): super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = file_ def __enter__( self: List[Any] ): self._file.__enter__() return self def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ): self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self: Any ): return iter(self._file ) def lowerCamelCase_ ( self: List[Any] ): return next(self._file ) def __getattr__( self: List[str] , UpperCamelCase__: Dict ): return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) lowerCamelCase__ : Optional[Any] = fixed_enter
631
1
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() _A : Union[str, Any] =logging.get_logger(__name__) set_seed(770) _A : Optional[Any] ={ '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } _A : Union[str, Any] ={ '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } _A : Any =os.path.dirname(os.path.abspath(__file__)) _A : Tuple =os.path.join(os.path.expanduser('''~'''), '''.cache''') _A : Dict =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Optional[Any]: lowerCamelCase__ : List[str] = model_type if use_small: key += "_small" return os.path.join(UpperCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) hf_hub_download(repo_id=UpperCamelCase , filename=UpperCamelCase , local_dir=UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase="text" ) -> Union[str, Any]: if model_type == "text": lowerCamelCase__ : Any = BarkSemanticModel lowerCamelCase__ : Union[str, Any] = BarkSemanticConfig lowerCamelCase__ : List[Any] = BarkSemanticGenerationConfig elif model_type == "coarse": lowerCamelCase__ : List[Any] = BarkCoarseModel lowerCamelCase__ : Union[str, Any] = BarkCoarseConfig lowerCamelCase__ : Dict = BarkCoarseGenerationConfig elif model_type == "fine": lowerCamelCase__ : int = BarkFineModel lowerCamelCase__ : Optional[int] = BarkFineConfig lowerCamelCase__ : Dict = BarkFineGenerationConfig else: raise NotImplementedError() lowerCamelCase__ : Dict = f'''{model_type}_small''' if use_small else model_type lowerCamelCase__ : Any = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(UpperCamelCase ): logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info["""repo_id"""] , model_info["""file_name"""] ) lowerCamelCase__ : int = torch.load(UpperCamelCase , map_location=UpperCamelCase ) # this is a hack lowerCamelCase__ : Any = checkpoint["""model_args"""] if "input_vocab_size" not in model_args: lowerCamelCase__ : Union[str, Any] = model_args["""vocab_size"""] lowerCamelCase__ : Any = model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowerCamelCase__ : Optional[Any] = model_args.pop("""n_head""" ) lowerCamelCase__ : List[Any] = model_args.pop("""n_embd""" ) lowerCamelCase__ : Tuple = model_args.pop("""n_layer""" ) lowerCamelCase__ : str = ConfigClass(**checkpoint["""model_args"""] ) lowerCamelCase__ : Dict = ModelClass(config=UpperCamelCase ) lowerCamelCase__ : Dict = GenerationConfigClass() lowerCamelCase__ : Dict = model_generation_config lowerCamelCase__ : List[Any] = checkpoint["""model"""] # fixup checkpoint lowerCamelCase__ : str = """_orig_mod.""" for k, v in list(state_dict.items() ): if k.startswith(UpperCamelCase ): # replace part of the key with corresponding layer name in HF implementation lowerCamelCase__ : Any = k[len(UpperCamelCase ) :] for old_layer_name in new_layer_name_dict: lowerCamelCase__ : int = new_k.replace(UpperCamelCase , new_layer_name_dict[old_layer_name] ) lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : str = set(state_dict.keys() ) - set(model.state_dict().keys() ) lowerCamelCase__ : str = {k for k in extra_keys if not k.endswith(""".attn.bias""" )} lowerCamelCase__ : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() ) lowerCamelCase__ : str = {k for k in missing_keys if not k.endswith(""".attn.bias""" )} if len(UpperCamelCase ) != 0: raise ValueError(f'''extra keys found: {extra_keys}''' ) if len(UpperCamelCase ) != 0: raise ValueError(f'''missing keys: {missing_keys}''' ) model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) lowerCamelCase__ : Any = model.num_parameters(exclude_embeddings=UpperCamelCase ) lowerCamelCase__ : List[Any] = checkpoint["""best_val_loss"""].item() logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCamelCase , 3 )} loss''' ) model.eval() model.to(UpperCamelCase ) del checkpoint, state_dict return model def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase="text" ) -> Any: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowerCamelCase__ : List[str] = """cpu""" # do conversion on cpu lowerCamelCase__ : Optional[int] = _get_ckpt_path(UpperCamelCase , use_small=UpperCamelCase ) lowerCamelCase__ : Any = _load_model(UpperCamelCase , UpperCamelCase , model_type=UpperCamelCase , use_small=UpperCamelCase ) # load bark initial model lowerCamelCase__ : List[str] = _bark_load_model(UpperCamelCase , """cpu""" , model_type=UpperCamelCase , use_small=UpperCamelCase ) if model_type == "text": lowerCamelCase__ : Tuple = bark_model["""model"""] if model.num_parameters(exclude_embeddings=UpperCamelCase ) != bark_model.get_num_params(): raise ValueError("""initial and new models don't have the same number of parameters""" ) # check if same output as the bark model lowerCamelCase__ : Union[str, Any] = 5 lowerCamelCase__ : List[Any] = 10 if model_type in ["text", "coarse"]: lowerCamelCase__ : Optional[int] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowerCamelCase__ : Tuple = bark_model(UpperCamelCase )[0] lowerCamelCase__ : List[str] = model(UpperCamelCase ) # take last logits lowerCamelCase__ : List[str] = output_new_model_total.logits[:, [-1], :] else: lowerCamelCase__ : Dict = 3 lowerCamelCase__ : Optional[int] = 8 lowerCamelCase__ : Any = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowerCamelCase__ : Dict = model(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : List[str] = bark_model(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[Any] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don't have the same shape""" ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError("""initial and new outputs are not equal""" ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Dict: lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : int = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase , """config.json""" ) ) lowerCamelCase__ : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase , """config.json""" ) ) lowerCamelCase__ : int = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase , """config.json""" ) ) lowerCamelCase__ : Tuple = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" ) lowerCamelCase__ : Any = BarkSemanticModel.from_pretrained(UpperCamelCase ) lowerCamelCase__ : Dict = BarkCoarseModel.from_pretrained(UpperCamelCase ) lowerCamelCase__ : str = BarkFineModel.from_pretrained(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" ) lowerCamelCase__ : int = BarkConfig.from_sub_model_configs( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Tuple = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowerCamelCase__ : Any = BarkModel(UpperCamelCase ) lowerCamelCase__ : List[Any] = semantic lowerCamelCase__ : List[Any] = coarseAcoustic lowerCamelCase__ : str = fineAcoustic lowerCamelCase__ : List[str] = codec lowerCamelCase__ : Tuple = bark_generation_config Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) bark.save_pretrained(UpperCamelCase , repo_id=UpperCamelCase , push_to_hub=UpperCamelCase ) if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') _A : Dict =parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
631
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _A : int =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: print("""Loading config file...""" ) def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): lowerCamelCase__ : Optional[int] = [] for k, v in d.items(): lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCamelCase ) lowerCamelCase__ : Any = argparse.Namespace() with open(UpperCamelCase , """r""" ) as yaml_file: try: lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader ) lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase ) for k, v in flat_cfg.items(): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) ) return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig() lowerCamelCase__ : str = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ : Optional[Any] = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : int = 384 else: lowerCamelCase__ : Optional[int] = 256 lowerCamelCase__ : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ : Tuple = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : str = 384 else: lowerCamelCase__ : Any = 256 lowerCamelCase__ : int = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ : Dict = 151 lowerCamelCase__ : str = 512 lowerCamelCase__ : List[Any] = """ade20k-id2label.json""" lowerCamelCase__ : Union[str, Any] = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ : Tuple = 21 lowerCamelCase__ : Optional[int] = 512 lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json""" lowerCamelCase__ : Tuple = True # orig_config lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase ) assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ : Tuple = """huggingface/label-files""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple: if base_model: lowerCamelCase__ : Optional[int] = """""" else: lowerCamelCase__ : Optional[Any] = """mobilevitv2.""" lowerCamelCase__ : List[str] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ : Optional[Any] = k[8:] else: lowerCamelCase__ : Optional[Any] = k if ".block." in k: lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ : int = [0, 1] elif i == 4: lowerCamelCase__ : str = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ : List[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase ) # load original state_dict lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval() lowerCamelCase__ : Tuple = False else: lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval() lowerCamelCase__ : Optional[Any] = False # remove and rename some keys of load the original model lowerCamelCase__ : Tuple = checkpoint remove_unused_keys(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load modified state_dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ : str = model(**UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ : Dict = outputs.logits lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ) assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Dict =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
631
1
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _A : Tuple ={ '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _A : Optional[int] =logging.get_logger(__name__) class _lowercase ( _lowercase ): a = """mask2former""" a = ["""swin"""] a = {"""hidden_size""": """hidden_dim"""} def __init__( self: List[Any] , UpperCamelCase__: Optional[Dict] = None , UpperCamelCase__: int = 256 , UpperCamelCase__: int = 256 , UpperCamelCase__: int = 256 , UpperCamelCase__: int = 1_024 , UpperCamelCase__: str = "relu" , UpperCamelCase__: int = 6 , UpperCamelCase__: int = 10 , UpperCamelCase__: int = 8 , UpperCamelCase__: float = 0.0 , UpperCamelCase__: int = 2_048 , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: int = 4 , UpperCamelCase__: int = 255 , UpperCamelCase__: int = 100 , UpperCamelCase__: float = 0.1 , UpperCamelCase__: float = 2.0 , UpperCamelCase__: float = 5.0 , UpperCamelCase__: float = 5.0 , UpperCamelCase__: int = 12_544 , UpperCamelCase__: float = 3.0 , UpperCamelCase__: float = 0.75 , UpperCamelCase__: float = 0.02 , UpperCamelCase__: float = 1.0 , UpperCamelCase__: bool = True , UpperCamelCase__: List[int] = [4, 8, 16, 32] , UpperCamelCase__: bool = None , **UpperCamelCase__: Union[str, Any] , ): if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) lowerCamelCase__ : List[Any] = CONFIG_MAPPING["""swin"""]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Dict = backbone_config.pop("""model_type""" ) lowerCamelCase__ : Dict = CONFIG_MAPPING[backbone_model_type] lowerCamelCase__ : List[str] = config_class.from_dict(UpperCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' F'''Supported model types: {','.join(self.backbones_supported )}''' ) lowerCamelCase__ : List[str] = backbone_config lowerCamelCase__ : Union[str, Any] = feature_size lowerCamelCase__ : Tuple = mask_feature_size lowerCamelCase__ : List[str] = hidden_dim lowerCamelCase__ : str = encoder_feedforward_dim lowerCamelCase__ : List[str] = activation_function lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : List[str] = decoder_layers lowerCamelCase__ : Union[str, Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = dropout lowerCamelCase__ : str = dim_feedforward lowerCamelCase__ : int = pre_norm lowerCamelCase__ : int = enforce_input_projection lowerCamelCase__ : int = common_stride lowerCamelCase__ : Tuple = ignore_value lowerCamelCase__ : Dict = num_queries lowerCamelCase__ : str = no_object_weight lowerCamelCase__ : Optional[Any] = class_weight lowerCamelCase__ : str = mask_weight lowerCamelCase__ : Union[str, Any] = dice_weight lowerCamelCase__ : List[str] = train_num_points lowerCamelCase__ : str = oversample_ratio lowerCamelCase__ : str = importance_sample_ratio lowerCamelCase__ : List[str] = init_std lowerCamelCase__ : str = init_xavier_std lowerCamelCase__ : Union[str, Any] = use_auxiliary_loss lowerCamelCase__ : Optional[int] = feature_strides lowerCamelCase__ : Optional[int] = output_auxiliary_logits lowerCamelCase__ : List[str] = decoder_layers super().__init__(**UpperCamelCase__ ) @classmethod def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase__: PretrainedConfig , **UpperCamelCase__: Any ): return cls( backbone_config=UpperCamelCase__ , **UpperCamelCase__ , ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) lowerCamelCase__ : Tuple = self.backbone_config.to_dict() lowerCamelCase__ : Any = self.__class__.model_type return output
631
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) ) class _lowercase : def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = conv_kernel_size lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Tuple = width_multiplier lowerCamelCase__ : List[Any] = ffn_dropout lowerCamelCase__ : Any = attn_dropout def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: List[Any] ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ): lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) a = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = MobileViTVaModelTester(self ) lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple = [*signature.parameters.keys()] lowerCamelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs.hidden_states lowerCamelCase__ : List[Any] = 5 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ : int = 2 for i in range(len(UpperCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : str = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : str = outputs.logits # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Dict = model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = outputs.logits.detach().cpu() lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] ) lowerCamelCase__ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase__ : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
631
1
'''simple docstring''' from __future__ import annotations from typing import Any def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> None: create_state_space_tree(UpperCamelCase , [] , 0 ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: if index == len(UpperCamelCase ): print(UpperCamelCase ) return create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 ) current_subsequence.pop() if __name__ == "__main__": _A : list[Any] =[3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['''A''', '''B''', '''C''']) generate_all_subsequences(seq)
631
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Optional[Any] =logging.get_logger(__name__) _A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Tuple ={ '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } _A : List[Any] ={ '''gpt-neox-20b''': 2_048, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = add_prefix_space def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : int = input_ids[-self.model_max_length :] return input_ids
631
1
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _A : str ='''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') _A : Any =F'https://www.google.com/search?q={query}&num=100' _A : str =requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: _A : Optional[Any] =( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: _A : str =parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Dict ={ '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =[ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A : Dict =logging.get_logger(__name__) _A : Dict ={ '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class _lowercase ( _lowercase , _lowercase ): a = """convnextv2""" def __init__( self: List[Any] , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Any=4 , UpperCamelCase__: int=4 , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: int="gelu" , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Optional[Any]=1e-12 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: List[str]=224 , UpperCamelCase__: Dict=None , UpperCamelCase__: List[Any]=None , **UpperCamelCase__: Any , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = num_channels lowerCamelCase__ : Optional[int] = patch_size lowerCamelCase__ : List[Any] = num_stages lowerCamelCase__ : Any = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes lowerCamelCase__ : List[Any] = [3, 3, 9, 3] if depths is None else depths lowerCamelCase__ : Any = hidden_act lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : int = layer_norm_eps lowerCamelCase__ : Dict = drop_path_rate lowerCamelCase__ : Dict = image_size lowerCamelCase__ : Union[str, Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = get_aligned_output_features_output_indices( out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
631
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _A : int =get_tests_dir('''fixtures/test_sentencepiece.model''') _A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''} _A : int ='''>>zh<<''' _A : Dict ='''Helsinki-NLP/''' if is_torch_available(): _A : List[Any] ='''pt''' elif is_tf_available(): _A : Optional[int] ='''tf''' else: _A : Dict ='''jax''' @require_sentencepiece class _lowercase ( _lowercase , unittest.TestCase ): a = MarianTokenizer a = False a = True def lowerCamelCase_ ( self: List[str] ): super().setUp() lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ): return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = """</s>""" lowerCamelCase__ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase__ : List[str] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase_ ( self: List[str] ): # fmt: off lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) lowerCamelCase__ : str = """Tämä on testi""" lowerCamelCase__ : Any = """This is a test""" lowerCamelCase__ : int = [76, 7, 2_047, 2] lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2] lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
631
1
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self: List[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: int=2 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: str=[1, 2, 1] , UpperCamelCase__: int=[2, 2, 4] , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Optional[Any]=2.0 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: str=0.1 , UpperCamelCase__: int="gelu" , UpperCamelCase__: List[str]=False , UpperCamelCase__: int=True , UpperCamelCase__: str=0.02 , UpperCamelCase__: List[str]=1e-5 , UpperCamelCase__: Any=True , UpperCamelCase__: Any=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Union[str, Any]=8 , ): lowerCamelCase__ : List[Any] = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : List[str] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Optional[int] = embed_dim lowerCamelCase__ : int = depths lowerCamelCase__ : Union[str, Any] = num_heads lowerCamelCase__ : Optional[Any] = window_size lowerCamelCase__ : List[str] = mlp_ratio lowerCamelCase__ : Dict = qkv_bias lowerCamelCase__ : List[str] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Dict = drop_path_rate lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Any = use_absolute_embeddings lowerCamelCase__ : Tuple = patch_norm lowerCamelCase__ : Union[str, Any] = layer_norm_eps lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : Dict = is_training lowerCamelCase__ : Dict = scope lowerCamelCase__ : Any = use_labels lowerCamelCase__ : Optional[int] = type_sequence_label_size lowerCamelCase__ : List[str] = encoder_stride def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Any = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Optional[Any] ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Dict ): lowerCamelCase__ : List[str] = SwinvaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Any = model(UpperCamelCase__ ) lowerCamelCase__ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: Any ): lowerCamelCase__ : Dict = SwinvaForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : List[Any] = 1 lowerCamelCase__ : List[Any] = SwinvaForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: List[str] ): lowerCamelCase__ : List[str] = self.type_sequence_label_size lowerCamelCase__ : List[Any] = SwinvaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Tuple = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) a = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Dict = SwinvaModelTester(self ) lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 ) def lowerCamelCase_ ( self: List[Any] ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Optional[int] = [*signature.parameters.keys()] lowerCamelCase__ : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Tuple = False lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : List[str] = outputs.attentions lowerCamelCase__ : Any = len(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase__ : Any = True lowerCamelCase__ : List[Any] = config.window_size**2 lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Union[str, Any] = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : Dict = True lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): lowerCamelCase__ : List[str] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCamelCase__ : Tuple = 2 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) ) lowerCamelCase__ : str = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ): lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : List[Any] = outputs.hidden_states lowerCamelCase__ : Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # Swinv2 has a different seq_length lowerCamelCase__ : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase__ : Dict = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = reshaped_hidden_states[0].shape lowerCamelCase__ : Optional[Any] = ( reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase__ : List[Any] = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Optional[int] = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[Any] = 3 lowerCamelCase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase__ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase__ : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Tuple = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Tuple ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = SwinvaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = _config_zero_init(UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(config=UpperCamelCase__ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[Any] ): return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : List[str] = self.default_image_processor lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Any = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Tuple = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """rwkv""" a = {"""max_position_embeddings""": """context_length"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = context_length lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase__ : List[str] = layer_norm_epsilon lowerCamelCase__ : int = rescale_every lowerCamelCase__ : Optional[int] = use_cache lowerCamelCase__ : Dict = bos_token_id lowerCamelCase__ : Any = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A : Union[str, Any] =logging.get_logger(__name__) _A : Tuple ={ '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class _lowercase ( _lowercase ): a = """segformer""" def __init__( self: List[Any] , UpperCamelCase__: Union[str, Any]=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Optional[Any]=[2, 2, 2, 2] , UpperCamelCase__: Optional[int]=[8, 4, 2, 1] , UpperCamelCase__: int=[32, 64, 160, 256] , UpperCamelCase__: List[Any]=[7, 3, 3, 3] , UpperCamelCase__: List[str]=[4, 2, 2, 2] , UpperCamelCase__: Optional[int]=[1, 2, 5, 8] , UpperCamelCase__: List[str]=[4, 4, 4, 4] , UpperCamelCase__: Union[str, Any]="gelu" , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[Any]=1e-6 , UpperCamelCase__: str=256 , UpperCamelCase__: str=255 , **UpperCamelCase__: int , ): super().__init__(**UpperCamelCase__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase__ , ) lowerCamelCase__ : Tuple = num_channels lowerCamelCase__ : str = num_encoder_blocks lowerCamelCase__ : List[str] = depths lowerCamelCase__ : Optional[int] = sr_ratios lowerCamelCase__ : List[str] = hidden_sizes lowerCamelCase__ : Optional[Any] = patch_sizes lowerCamelCase__ : Dict = strides lowerCamelCase__ : Dict = mlp_ratios lowerCamelCase__ : Dict = num_attention_heads lowerCamelCase__ : Optional[int] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = classifier_dropout_prob lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = drop_path_rate lowerCamelCase__ : Any = layer_norm_eps lowerCamelCase__ : Optional[Any] = decoder_hidden_size lowerCamelCase__ : Optional[int] = kwargs.get("""reshape_last_stage""" , UpperCamelCase__ ) lowerCamelCase__ : Any = semantic_loss_ignore_index class _lowercase ( _lowercase ): a = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self: Tuple ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self: Tuple ): return 1e-4 @property def lowerCamelCase_ ( self: Union[str, Any] ): return 12
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : str =logging.get_logger(__name__) _A : int ={ '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """roc_bert""" def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Optional[Any] = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Tuple = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Tuple = type_vocab_size lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : List[Any] = use_cache lowerCamelCase__ : Tuple = enable_pronunciation lowerCamelCase__ : Union[str, Any] = enable_shape lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim lowerCamelCase__ : Any = pronunciation_vocab_size lowerCamelCase__ : int = shape_embed_dim lowerCamelCase__ : Tuple = shape_vocab_size lowerCamelCase__ : Optional[Any] = concat_input lowerCamelCase__ : str = position_embedding_type lowerCamelCase__ : Dict = classifier_dropout super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( _lowercase ): a = """""" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ): super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : List[Any] = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] ) lowerCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" ) def lowerCamelCase_ ( self: Tuple ): if self.dir_cache is None: lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} lowerCamelCase__ : int = {f["""name"""]: f} def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): return self.file.open().read() def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _lowercase ( _lowercase ): a = """bz2""" a = """bz2""" a = """.bz2""" class _lowercase ( _lowercase ): a = """gzip""" a = """gzip""" a = """.gz""" class _lowercase ( _lowercase ): a = """lz4""" a = """lz4""" a = """.lz4""" class _lowercase ( _lowercase ): a = """xz""" a = """xz""" a = """.xz""" class _lowercase ( _lowercase ): a = """zstd""" a = """zstd""" a = """.zst""" def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ): super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = file_ def __enter__( self: List[Any] ): self._file.__enter__() return self def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ): self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self: Any ): return iter(self._file ) def lowerCamelCase_ ( self: List[Any] ): return next(self._file ) def __getattr__( self: List[str] , UpperCamelCase__: Dict ): return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) lowerCamelCase__ : Optional[Any] = fixed_enter
631
'''simple docstring''' import sys import turtle def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) _A : Any =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') _A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
631
1
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowercase ( unittest.TestCase ): a = JukeboxTokenizer a = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def lowerCamelCase_ ( self: str ): import torch lowerCamelCase__ : Optional[int] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) lowerCamelCase__ : Tuple = tokenizer(**self.metas )["""input_ids"""] # fmt: off lowerCamelCase__ : List[str] = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1_069, 11]] ), torch.tensor([[0, 0, 0, 1_069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def lowerCamelCase_ ( self: Optional[Any] ): import torch lowerCamelCase__ : str = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) lowerCamelCase__ : Tuple = tokenizer(**self.metas )["""input_ids"""] # fmt: off lowerCamelCase__ : Any = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
631
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowercase : def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Union[str, Any] = 13 lowerCamelCase__ : Any = 7 lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : str = 99 lowerCamelCase__ : Dict = 384 lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Optional[Any] = 37 lowerCamelCase__ : Union[str, Any] = """gelu""" lowerCamelCase__ : int = 0.1 lowerCamelCase__ : Optional[Any] = 0.1 lowerCamelCase__ : List[Any] = 512 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Any = 2 lowerCamelCase__ : Optional[Any] = 0.02 lowerCamelCase__ : int = 3 lowerCamelCase__ : List[str] = 4 lowerCamelCase__ : Any = 128 lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Optional[Any] = 9 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Optional[int] = None def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : str = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : int = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ ) lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Tuple = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : int = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ): lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ ) lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : str = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a = False a = False a = False def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = TFConvBertModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = True lowerCamelCase__ : Tuple = True if hasattr(UpperCamelCase__ , """use_cache""" ): lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ ) lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""] lowerCamelCase__ : Any = outputs["""encoder_attentions"""] else: lowerCamelCase__ : int = outputs["""hidden_states"""] lowerCamelCase__ : Optional[int] = outputs["""attentions"""] self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase__ : Any = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase__: List[str] ): lowerCamelCase__ : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : List[Any] = True lowerCamelCase__ : int = True lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : Dict = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
631
1
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) lowerCamelCase__ : Optional[Any] = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCamelCase__ : Dict = load_dataset("""nielsr/rvlcdip-demo""" ) lowerCamelCase__ : List[str] = dataset["""train"""][0]["""image"""].convert("""RGB""" ) lowerCamelCase__ : Dict = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : Any = outputs.logits lowerCamelCase__ : Optional[int] = torch.Size((1, 16) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=UpperCamelCase__ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
631
'''simple docstring''' _A : List[str] ='''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
631
1
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=3 , UpperCamelCase__: Optional[int]=16 , UpperCamelCase__: List[Any]=[32, 64, 128] , UpperCamelCase__: str=[1, 2, 1] , UpperCamelCase__: List[Any]=[2, 2, 4] , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Tuple=2.0 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Union[str, Any]=0.0 , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[str]="gelu" , UpperCamelCase__: Dict=False , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: str=1e-5 , UpperCamelCase__: Any=True , UpperCamelCase__: List[str]=None , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Union[str, Any]=8 , UpperCamelCase__: List[str]=["stage1", "stage2"] , UpperCamelCase__: Union[str, Any]=[1, 2] , ): lowerCamelCase__ : Tuple = parent lowerCamelCase__ : Optional[int] = batch_size lowerCamelCase__ : Dict = image_size lowerCamelCase__ : Any = patch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : List[Any] = embed_dim lowerCamelCase__ : List[str] = hidden_sizes lowerCamelCase__ : Optional[Any] = depths lowerCamelCase__ : Dict = num_heads lowerCamelCase__ : Optional[Any] = window_size lowerCamelCase__ : str = mlp_ratio lowerCamelCase__ : Optional[Any] = qkv_bias lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : int = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = drop_path_rate lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : List[Any] = use_absolute_embeddings lowerCamelCase__ : Dict = patch_norm lowerCamelCase__ : Union[str, Any] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : Dict = scope lowerCamelCase__ : List[Any] = use_labels lowerCamelCase__ : List[str] = type_sequence_label_size lowerCamelCase__ : List[Any] = encoder_stride lowerCamelCase__ : Optional[int] = out_features lowerCamelCase__ : Optional[int] = out_indices def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None if self.use_labels: lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : str = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Any ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Dict , UpperCamelCase__: str ): lowerCamelCase__ : Any = FocalNetModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase__ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = FocalNetBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : List[str] = FocalNetBackbone(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = FocalNetForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Dict = FocalNetForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Dict ): lowerCamelCase__ : List[str] = self.type_sequence_label_size lowerCamelCase__ : int = FocalNetForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Tuple = FocalNetForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : List[str] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) a = ( {"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False a = False def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Optional[Any] = FocalNetModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self: str ): return def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self: List[Any] ): pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def lowerCamelCase_ ( self: List[str] ): pass def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : List[str] = [*signature.parameters.keys()] lowerCamelCase__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any ): lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : str = outputs.hidden_states lowerCamelCase__ : int = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # FocalNet has a different seq_length lowerCamelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase__ : List[str] = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = reshaped_hidden_states[0].shape lowerCamelCase__ : str = ( reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase__ : Any = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Dict = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Tuple = 3 lowerCamelCase__ : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase__ : Dict = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : List[str] = True self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) ) @slow def lowerCamelCase_ ( self: Optional[Any] ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = FocalNetModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : List[Any] = _config_zero_init(UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : List[str] = model_class(config=UpperCamelCase__ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Any ): # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(UpperCamelCase__ ) lowerCamelCase__ : List[str] = self.default_image_processor lowerCamelCase__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : str = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class _lowercase ( _lowercase , unittest.TestCase ): a = (FocalNetBackbone,) if is_torch_available() else () a = FocalNetConfig a = False def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = FocalNetModelTester(self )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Any =logging.get_logger(__name__) _A : Dict ={ '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _lowercase ( _lowercase ): a = """trocr""" a = ["""past_key_values"""] a = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ): lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Tuple = d_model lowerCamelCase__ : Any = decoder_layers lowerCamelCase__ : Dict = decoder_attention_heads lowerCamelCase__ : str = decoder_ffn_dim lowerCamelCase__ : Tuple = activation_function lowerCamelCase__ : Dict = max_position_embeddings lowerCamelCase__ : int = dropout lowerCamelCase__ : int = attention_dropout lowerCamelCase__ : List[Any] = activation_dropout lowerCamelCase__ : Union[str, Any] = init_std lowerCamelCase__ : Optional[int] = decoder_layerdrop lowerCamelCase__ : Dict = use_cache lowerCamelCase__ : Any = scale_embedding lowerCamelCase__ : Optional[int] = use_learned_position_embeddings lowerCamelCase__ : List[str] = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
631
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class _lowercase : def __init__( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any]=13 , UpperCamelCase__: Optional[int]=7 , UpperCamelCase__: Any=True , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: int=99 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[Any]=512 , UpperCamelCase__: str=16 , UpperCamelCase__: Dict=2 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[str]="None" , UpperCamelCase__: List[str]=3 , UpperCamelCase__: Dict=4 , UpperCamelCase__: List[Any]=None , ): lowerCamelCase__ : str = parent lowerCamelCase__ : Any = batch_size lowerCamelCase__ : Optional[Any] = seq_length lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : Any = use_input_mask lowerCamelCase__ : int = use_token_type_ids lowerCamelCase__ : str = use_labels lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Dict = num_attention_heads lowerCamelCase__ : List[Any] = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Dict = type_vocab_size lowerCamelCase__ : Dict = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Dict = num_labels lowerCamelCase__ : str = num_choices lowerCamelCase__ : List[Any] = relative_attention lowerCamelCase__ : Tuple = position_biased_input lowerCamelCase__ : Union[str, Any] = pos_att_type lowerCamelCase__ : Optional[Any] = scope def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : int = None if self.use_input_mask: lowerCamelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : List[str] = None lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Optional[int] = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Dict , UpperCamelCase__: str ): lowerCamelCase__ : List[Any] = TFDebertaVaModel(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Any = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Dict = TFDebertaVaForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Dict = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : int = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Any = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: str ): lowerCamelCase__ : str = self.num_labels lowerCamelCase__ : Optional[Any] = TFDebertaVaForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ): lowerCamelCase__ : Tuple = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : List[Any] = config_and_inputs lowerCamelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFDebertaVaModel, """fill-mask""": TFDebertaVaForMaskedLM, """question-answering""": TFDebertaVaForQuestionAnswering, """text-classification""": TFDebertaVaForSequenceClassification, """token-classification""": TFDebertaVaForTokenClassification, """zero-shot""": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) a = False a = False def lowerCamelCase_ ( self: int ): lowerCamelCase__ : int = TFDebertaVaModelTester(self ) lowerCamelCase__ : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[Any] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Any = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) self.assertIsNotNone(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @unittest.skip(reason="""Model not available yet""" ) def lowerCamelCase_ ( self: List[Any] ): pass @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) lowerCamelCase__ : Dict = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) lowerCamelCase__ : List[str] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] lowerCamelCase__ : str = tf.constant( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 )
631
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : str = [False] * len(UpperCamelCase ) lowerCamelCase__ : str = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Union[str, Any] = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
631
1
'''simple docstring''' _A : List[str] ={ '''a''': '''AAAAA''', '''b''': '''AAAAB''', '''c''': '''AAABA''', '''d''': '''AAABB''', '''e''': '''AABAA''', '''f''': '''AABAB''', '''g''': '''AABBA''', '''h''': '''AABBB''', '''i''': '''ABAAA''', '''j''': '''BBBAA''', '''k''': '''ABAAB''', '''l''': '''ABABA''', '''m''': '''ABABB''', '''n''': '''ABBAA''', '''o''': '''ABBAB''', '''p''': '''ABBBA''', '''q''': '''ABBBB''', '''r''': '''BAAAA''', '''s''': '''BAAAB''', '''t''': '''BAABA''', '''u''': '''BAABB''', '''v''': '''BBBAB''', '''w''': '''BABAA''', '''x''': '''BABAB''', '''y''': '''BABBA''', '''z''': '''BABBB''', ''' ''': ''' ''', } _A : Optional[Any] ={value: key for key, value in encode_dict.items()} def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: lowerCamelCase__ : Union[str, Any] = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: if set(UpperCamelCase ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) lowerCamelCase__ : str = """""" for word in coded.split(): while len(UpperCamelCase ) != 0: decoded += decode_dict[word[:5]] lowerCamelCase__ : List[str] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
631
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Optional[int] = dataset lowerCamelCase__ : Optional[int] = process lowerCamelCase__ : List[str] = params def __len__( self: List[str] ): return len(self.dataset ) def __getitem__( self: Any , UpperCamelCase__: int ): lowerCamelCase__ : Dict = self.dataset[i] lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params ) return processed class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ): lowerCamelCase__ : int = loader lowerCamelCase__ : str = infer lowerCamelCase__ : Optional[int] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : int = loader_batch_size # Internal bookkeeping lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None def __len__( self: Dict ): return len(self.loader ) def __iter__( self: Optional[int] ): lowerCamelCase__ : List[Any] = iter(self.loader ) return self def lowerCamelCase_ ( self: Any ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase__ : int = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Convert ModelOutput to tuple first lowerCamelCase__ : str = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase__ : List[str] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase__ : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ ) self._loader_batch_index += 1 return result def lowerCamelCase_ ( self: List[Any] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase__ : Optional[Any] = next(self.iterator ) lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Optional[Any] = processed else: lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0] lowerCamelCase__ : Any = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Any = len(UpperCamelCase__ ) else: lowerCamelCase__ : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase__ : List[Any] = processed lowerCamelCase__ : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _lowercase ( _lowercase ): def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ): super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __iter__( self: Union[str, Any] ): lowerCamelCase__ : str = iter(self.loader ) lowerCamelCase__ : int = None return self def lowerCamelCase_ ( self: str ): if self.subiterator is None: lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase__ : Tuple = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase__ : Union[str, Any] = next(self.subiterator ) return processed class _lowercase ( _lowercase ): def __iter__( self: List[Any] ): lowerCamelCase__ : int = iter(self.loader ) return self def lowerCamelCase_ ( self: Tuple ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase__ : List[str] = False lowerCamelCase__ : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : Any = self.loader_batch_item() lowerCamelCase__ : Tuple = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator while not is_last: lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Dict = processed else: lowerCamelCase__ : Dict = list(processed.keys() )[0] lowerCamelCase__ : Dict = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) else: lowerCamelCase__ : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : str = observed_batch_size lowerCamelCase__ : str = processed lowerCamelCase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : List[Any] = self.loader_batch_item() lowerCamelCase__ : str = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator else: lowerCamelCase__ : Optional[Any] = processed lowerCamelCase__ : Optional[int] = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) return accumulator class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ): lowerCamelCase__ : Union[str, Any] = dataset lowerCamelCase__ : str = key def __len__( self: Optional[Any] ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Any ): return self.dataset[i][self.key] class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ): lowerCamelCase__ : str = dataset lowerCamelCase__ : Dict = keya lowerCamelCase__ : List[str] = keya def __len__( self: str ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
631
1
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _A : Union[str, Any] =object() # For specifying empty leaf dict `{}` _A : Optional[int] =object() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str: lowerCamelCase__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(UpperCamelCase ) - len(UpperCamelCase ) + 1 ): lowerCamelCase__ : List[Any] = [x.match(UpperCamelCase ) for x, y in zip(UpperCamelCase , ks[i:] )] if matches and all(UpperCamelCase ): return True return False def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: def replace(UpperCamelCase , UpperCamelCase ): for rule, replacement in rules: if _match(UpperCamelCase , UpperCamelCase ): return replacement return val return replace def SCREAMING_SNAKE_CASE_ () -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , UpperCamelCase )), (("transformer", "wte", "embedding"), P("""mp""" , UpperCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , UpperCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , UpperCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Optional[int] = _get_partition_rules() lowerCamelCase__ : Tuple = _replacement_rules(UpperCamelCase ) lowerCamelCase__ : str = {k: _unmatched for k in flatten_dict(UpperCamelCase )} lowerCamelCase__ : Tuple = {k: replace(UpperCamelCase , UpperCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase ) )
631
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _A : Dict ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
631
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class _lowercase ( _lowercase ): def __init__( self: List[Any] , UpperCamelCase__: Optional[int]="" , UpperCamelCase__: Tuple="train" ): assert os.path.isdir(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = [] lowerCamelCase__ : Optional[Any] = os.listdir(UpperCamelCase__ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) if not os.path.isfile(UpperCamelCase__ ): continue self.documents.append(UpperCamelCase__ ) def __len__( self: int ): return len(self.documents ) def __getitem__( self: Dict , UpperCamelCase__: List[str] ): lowerCamelCase__ : Optional[int] = self.documents[idx] lowerCamelCase__ : Optional[int] = document_path.split("""/""" )[-1] with open(UpperCamelCase__ , encoding="""utf-8""" ) as source: lowerCamelCase__ : List[str] = source.read() lowerCamelCase__ , lowerCamelCase__ : List[str] = process_story(UpperCamelCase__ ) return document_name, story_lines, summary_lines def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: lowerCamelCase__ : List[str] = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it lowerCamelCase__ : Union[str, Any] = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines] # gather article lines lowerCamelCase__ : Dict = [] lowerCamelCase__ : List[str] = deque(UpperCamelCase ) while True: try: lowerCamelCase__ : str = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(UpperCamelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCamelCase__ : Optional[Any] = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) ) return story_lines, summary_lines def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: lowerCamelCase__ : Optional[Any] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: if len(UpperCamelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) ) return sequence def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : Dict = torch.ones_like(UpperCamelCase ) lowerCamelCase__ : Any = sequence == pad_token_id lowerCamelCase__ : List[str] = 0 return mask def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: lowerCamelCase__ : str = [tokenizer.encode(UpperCamelCase ) for line in story_lines] lowerCamelCase__ : Dict = [token for sentence in story_lines_token_ids for token in sentence] lowerCamelCase__ : Optional[int] = [tokenizer.encode(UpperCamelCase ) for line in summary_lines] lowerCamelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = [] for sequence in batch: lowerCamelCase__ : Optional[Any] = -1 lowerCamelCase__ : Optional[int] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(UpperCamelCase ) return torch.tensor(UpperCamelCase )
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _A : Any ={ '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder _A : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name _A : Dict =256 class _lowercase ( _lowercase ): a = ["""melgan"""] def __init__( self: int , UpperCamelCase__: SpectrogramNotesEncoder , UpperCamelCase__: SpectrogramContEncoder , UpperCamelCase__: TaFilmDecoder , UpperCamelCase__: DDPMScheduler , UpperCamelCase__: OnnxRuntimeModel if is_onnx_available() else Any , ): super().__init__() # From MELGAN lowerCamelCase__ : str = math.log(1e-5 ) # Matches MelGAN training. lowerCamelCase__ : List[str] = 4.0 # Largest value for most examples lowerCamelCase__ : Tuple = 128 self.register_modules( notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int]=(-1.0, 1.0) , UpperCamelCase__: Tuple=False ): lowerCamelCase__ , lowerCamelCase__ : Any = output_range if clip: lowerCamelCase__ : Union[str, Any] = torch.clip(UpperCamelCase__ , self.min_value , self.max_value ) # Scale to [0, 1]. lowerCamelCase__ : int = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] , UpperCamelCase__: int=(-1.0, 1.0) , UpperCamelCase__: List[str]=False ): lowerCamelCase__ , lowerCamelCase__ : str = input_range lowerCamelCase__ : Dict = torch.clip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if clip else outputs # Scale to [0, 1]. lowerCamelCase__ : List[Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : Any = input_tokens > 0 lowerCamelCase__ , lowerCamelCase__ : List[str] = self.notes_encoder( encoder_input_tokens=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ : Any = self.continuous_encoder( encoder_inputs=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Tuple ): lowerCamelCase__ : Optional[Any] = noise_time if not torch.is_tensor(UpperCamelCase__ ): lowerCamelCase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0: lowerCamelCase__ : Tuple = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCamelCase__ : List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowerCamelCase__ : Tuple = self.decoder( encodings_and_masks=UpperCamelCase__ , decoder_input_tokens=UpperCamelCase__ , decoder_noise_time=UpperCamelCase__ ) return logits @torch.no_grad() def __call__( self: List[str] , UpperCamelCase__: List[List[int]] , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: int = 100 , UpperCamelCase__: bool = True , UpperCamelCase__: str = "numpy" , UpperCamelCase__: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__: int = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(UpperCamelCase__ )}.''' ) lowerCamelCase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowerCamelCase__ : Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowerCamelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device ) for i, encoder_input_tokens in enumerate(UpperCamelCase__ ): if i == 0: lowerCamelCase__ : Optional[Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowerCamelCase__ : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowerCamelCase__ : Optional[int] = ones lowerCamelCase__ : Optional[int] = self.scale_features( UpperCamelCase__ , output_range=[-1.0, 1.0] , clip=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase__ , continuous_mask=UpperCamelCase__ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowerCamelCase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=UpperCamelCase__ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(UpperCamelCase__ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowerCamelCase__ : str = self.decode( encodings_and_masks=UpperCamelCase__ , input_tokens=UpperCamelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowerCamelCase__ : Tuple = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowerCamelCase__ : Tuple = self.scale_to_features(UpperCamelCase__ , input_range=[-1.0, 1.0] ) lowerCamelCase__ : Optional[Any] = mel[:1] lowerCamelCase__ : int = mel.cpu().float().numpy() lowerCamelCase__ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase__ , UpperCamelCase__ ) logger.info("""Generated segment""" , UpperCamelCase__ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": lowerCamelCase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowerCamelCase__ : str = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=UpperCamelCase__ )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Union[str, Any] =logging.get_logger(__name__) _A : List[str] ={ '''MIT/ast-finetuned-audioset-10-10-0.4593''': ( '''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json''' ), } class _lowercase ( _lowercase ): a = """audio-spectrogram-transformer""" def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : List[Any] = patch_size lowerCamelCase__ : List[str] = qkv_bias lowerCamelCase__ : Dict = frequency_stride lowerCamelCase__ : List[Any] = time_stride lowerCamelCase__ : str = max_length lowerCamelCase__ : Dict = num_mel_bins
631
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = StableDiffusionPanoramaPipeline a = TEXT_TO_IMAGE_PARAMS a = TEXT_TO_IMAGE_BATCH_PARAMS a = TEXT_TO_IMAGE_IMAGE_PARAMS a = TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase_ ( self: Optional[Any] ): torch.manual_seed(0 ) lowerCamelCase__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowerCamelCase__ : Dict = DDIMScheduler() torch.manual_seed(0 ) lowerCamelCase__ : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowerCamelCase__ : Any = CLIPTextModel(UpperCamelCase__ ) lowerCamelCase__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__ : Tuple = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any]=0 ): lowerCamelCase__ : List[str] = torch.manual_seed(UpperCamelCase__ ) lowerCamelCase__ : str = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : str = StableDiffusionPanoramaPipeline(**UpperCamelCase__ ) lowerCamelCase__ : int = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase__ : Dict = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : Dict = sd_pipe(**UpperCamelCase__ ).images lowerCamelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Tuple = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self: Any ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self: Union[str, Any] ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Optional[Any] = self.get_dummy_components() lowerCamelCase__ : Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ ) lowerCamelCase__ : str = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = """french fries""" lowerCamelCase__ : Any = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ ) lowerCamelCase__ : Any = output.images lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Optional[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : List[Any] = self.get_dummy_components() lowerCamelCase__ : Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : List[str] = sd_pipe(**UpperCamelCase__ , view_batch_size=2 ) lowerCamelCase__ : Optional[Any] = output.images lowerCamelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Any = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Dict = self.get_dummy_components() lowerCamelCase__ : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowerCamelCase__ : Dict = StableDiffusionPanoramaPipeline(**UpperCamelCase__ ) lowerCamelCase__ : Any = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = sd_pipe(**UpperCamelCase__ ).images lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Dict = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Dict = self.get_dummy_components() lowerCamelCase__ : Optional[Any] = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCamelCase__ ) lowerCamelCase__ : List[str] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = sd_pipe(**UpperCamelCase__ ).images lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any=0 ): lowerCamelCase__ : List[str] = torch.manual_seed(UpperCamelCase__ ) lowerCamelCase__ : Dict = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[Any] = """stabilityai/stable-diffusion-2-base""" lowerCamelCase__ : str = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" ) lowerCamelCase__ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() lowerCamelCase__ : Union[str, Any] = self.get_inputs() lowerCamelCase__ : List[Any] = pipe(**UpperCamelCase__ ).images lowerCamelCase__ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) lowerCamelCase__ : List[Any] = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCamelCase__ ) lowerCamelCase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() lowerCamelCase__ : Union[str, Any] = self.get_inputs() lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ ).images lowerCamelCase__ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) lowerCamelCase__ : str = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Union[str, Any] = 0 def callback_fn(UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: torch.FloatTensor ) -> None: lowerCamelCase__ : int = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase__ : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowerCamelCase__ : str = latents[0, -3:, -3:, -1] lowerCamelCase__ : Union[str, Any] = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase__ : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowerCamelCase__ : Tuple = latents[0, -3:, -3:, -1] lowerCamelCase__ : int = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase__ : Optional[Any] = False lowerCamelCase__ : Any = """stabilityai/stable-diffusion-2-base""" lowerCamelCase__ : List[Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" ) lowerCamelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() lowerCamelCase__ : Any = self.get_inputs() pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCamelCase_ ( self: Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : str = """stabilityai/stable-diffusion-2-base""" lowerCamelCase__ : Optional[Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" ) lowerCamelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ ) lowerCamelCase__ : Dict = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase__ : int = self.get_inputs() lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
631
'''simple docstring''' import argparse import os import re import packaging.version _A : List[str] ='''examples/''' _A : Any ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _A : int ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } _A : int ='''README.md''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : List[str] = f.read() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern] lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase ) lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: for folder, directories, fnames in os.walk(UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not patch: update_version_in_examples(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ : Dict = """1. Want to contribute a new model?""" with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : int = f.readlines() # Find the start of the list. lowerCamelCase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ : List[Any] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ : int = f.read() lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0] return packaging.version.parse(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ : List[str] = default_version.base_version elif patch: lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Optional[int] = default_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase , patch=UpperCamelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE_ () -> List[str]: lowerCamelCase__ : Optional[int] = get_version() lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ : Any = current_version.base_version # Check with the user we got that right. lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Dict = dev_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _A : List[str] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
631
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _A : Dict =logging.get_logger(__name__) class _lowercase ( _lowercase ): a = ["""input_features""", """is_longer"""] def __init__( self: Dict , UpperCamelCase__: Optional[Any]=64 , UpperCamelCase__: Dict=48_000 , UpperCamelCase__: Any=480 , UpperCamelCase__: str=10 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: List[str]=False , UpperCamelCase__: float = 0 , UpperCamelCase__: float = 14_000 , UpperCamelCase__: int = None , UpperCamelCase__: str = "fusion" , UpperCamelCase__: str = "repeatpad" , **UpperCamelCase__: Any , ): super().__init__( feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Union[str, Any] = top_db lowerCamelCase__ : Tuple = truncation lowerCamelCase__ : Any = padding lowerCamelCase__ : str = fft_window_size lowerCamelCase__ : Dict = (fft_window_size >> 1) + 1 lowerCamelCase__ : str = hop_length lowerCamelCase__ : List[str] = max_length_s lowerCamelCase__ : Any = max_length_s * sampling_rate lowerCamelCase__ : Optional[int] = sampling_rate lowerCamelCase__ : Tuple = frequency_min lowerCamelCase__ : Tuple = frequency_max lowerCamelCase__ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale="""htk""" , ) lowerCamelCase__ : int = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowerCamelCase__ : Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: np.array , UpperCamelCase__: Optional[np.array] = None ): lowerCamelCase__ : List[str] = spectrogram( UpperCamelCase__ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel="""dB""" , ) return log_mel_spectrogram.T def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict ): lowerCamelCase__ : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase__ : Dict = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCamelCase__ : List[str] = [0] # randomly choose index for each part lowerCamelCase__ : List[Any] = np.random.choice(ranges[0] ) lowerCamelCase__ : List[str] = np.random.choice(ranges[1] ) lowerCamelCase__ : Union[str, Any] = np.random.choice(ranges[2] ) lowerCamelCase__ : List[Any] = mel[idx_front : idx_front + chunk_frames, :] lowerCamelCase__ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] lowerCamelCase__ : List[str] = mel[idx_back : idx_back + chunk_frames, :] lowerCamelCase__ : Tuple = torch.tensor(mel[None, None, :] ) lowerCamelCase__ : Optional[Any] = torch.nn.functional.interpolate( UpperCamelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = mel_shrink[0][0].numpy() lowerCamelCase__ : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: np.array , UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: Any ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCamelCase__ : Optional[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) - max_length lowerCamelCase__ : Any = np.random.randint(0 , overflow + 1 ) lowerCamelCase__ : Tuple = waveform[idx : idx + max_length] lowerCamelCase__ : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCamelCase__ : Tuple = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters ) lowerCamelCase__ : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCamelCase__ : Dict = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCamelCase__ : Tuple = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCamelCase__ : Dict = False else: lowerCamelCase__ : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: lowerCamelCase__ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCamelCase__ : Union[str, Any] = int(max_length / len(UpperCamelCase__ ) ) lowerCamelCase__ : Dict = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCamelCase__ : Dict = int(max_length / len(UpperCamelCase__ ) ) lowerCamelCase__ : int = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : str = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 ) if truncation == "fusion": lowerCamelCase__ : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters ) lowerCamelCase__ : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCamelCase__ : Optional[int] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self: Dict , UpperCamelCase__: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__: str = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , **UpperCamelCase__: Dict , ): lowerCamelCase__ : List[Any] = truncation if truncation is not None else self.truncation lowerCamelCase__ : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase__ : str = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase__ : List[Any] = is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ : Dict = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): lowerCamelCase__ : Optional[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ : int = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ : int = [np.asarray(UpperCamelCase__ )] # convert to mel spectrogram, truncate and pad if needed. lowerCamelCase__ : str = [ self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ ) for waveform in raw_speech ] lowerCamelCase__ : str = [] lowerCamelCase__ : int = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase__ ) is_longer.append(UpperCamelCase__ ) if truncation == "fusion" and sum(UpperCamelCase__ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCamelCase__ : Optional[Any] = np.random.randint(0 , len(UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase__ ): lowerCamelCase__ : int = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCamelCase__ : Optional[Any] = [[longer] for longer in is_longer] lowerCamelCase__ : Any = {"""input_features""": input_mel, """is_longer""": is_longer} lowerCamelCase__ : Union[str, Any] = BatchFeature(UpperCamelCase__ ) if return_tensors is not None: lowerCamelCase__ : str = input_features.convert_to_tensors(UpperCamelCase__ ) return input_features
631
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _A : Union[str, Any] =False class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ): set_seed(0 ) lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 ) lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCamelCase__ : List[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) lowerCamelCase__ : Any = DDIMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )] # train with a DDPM scheduler lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
631
1
'''simple docstring''' _A : List[str] ='''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
631
'''simple docstring''' from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : Optional[int] = 0 # Number of processes finished lowerCamelCase__ : Union[str, Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ : Tuple = [0] * no_of_process # List to include calculation results lowerCamelCase__ : List[str] = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )] lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ : str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ : Union[str, Any] = arrival_time[i] lowerCamelCase__ : Any = 0 # Index showing the location of the process being performed lowerCamelCase__ : Union[str, Any] = 0 # Saves the current response ratio. lowerCamelCase__ : Any = 0 for i in range(0 , UpperCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ : int = temp lowerCamelCase__ : str = i # Calculate the turn around time lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : int = [0] * no_of_process for i in range(0 , UpperCamelCase ): lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A : List[str] =5 _A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E'''] _A : Optional[int] =[1, 2, 3, 4, 5] _A : Dict =[1, 2, 3, 4, 5] _A : Any =calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A : Optional[int] =calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
631
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: _A : str =None _A : str =logging.get_logger(__name__) _A : Any ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _A : List[str] ={ '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } _A : Any ={ '''google/fnet-base''': 512, '''google/fnet-large''': 512, } _A : Optional[Any] ='''▁''' class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """token_type_ids"""] a = FNetTokenizer def __init__( self: Tuple , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[str]="<unk>" , UpperCamelCase__: Union[str, Any]="[SEP]" , UpperCamelCase__: List[Any]="<pad>" , UpperCamelCase__: int="[CLS]" , UpperCamelCase__: Dict="[MASK]" , **UpperCamelCase__: Optional[Any] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCamelCase__ : Optional[int] = ( AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = do_lower_case lowerCamelCase__ : Union[str, Any] = remove_space lowerCamelCase__ : Tuple = keep_accents lowerCamelCase__ : List[str] = vocab_file lowerCamelCase__ : int = False if not self.vocab_file else True def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ): lowerCamelCase__ : Optional[Any] = [self.sep_token_id] lowerCamelCase__ : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ): lowerCamelCase__ : List[str] = [self.sep_token_id] lowerCamelCase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ : Any = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
631
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
631
1
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowercase ( nn.Module ): def __init__( self: Any , UpperCamelCase__: nn.Module , UpperCamelCase__: int ): super().__init__() lowerCamelCase__ : List[Any] = module lowerCamelCase__ : Any = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , ) lowerCamelCase__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Any , *UpperCamelCase__: Any , **UpperCamelCase__: Tuple ): return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowercase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module a = """bigscience/bloom-1b7""" # Constant values a = 2.1_09_65_95_52_69_25_74 a = """Hello my name is""" a = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) a = 10 def lowerCamelCase_ ( self: Any ): # Models and tokenizer lowerCamelCase__ : int = AutoTokenizer.from_pretrained(self.model_name ) class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Union[str, Any] ): super().setUp() # Models and tokenizer lowerCamelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) lowerCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) def lowerCamelCase_ ( self: Tuple ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Any = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""" ) ) lowerCamelCase__ : List[str] = config.to_dict() lowerCamelCase__ : Optional[int] = config.to_diff_dict() lowerCamelCase__ : List[str] = config.to_json_string() def lowerCamelCase_ ( self: Dict ): from bitsandbytes.nn import Paramsabit lowerCamelCase__ : Optional[int] = self.model_fpaa.get_memory_footprint() lowerCamelCase__ : Dict = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowerCamelCase__ : Optional[int] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase_ ( self: Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ) lowerCamelCase__ : Optional[int] = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Dict = BitsAndBytesConfig() lowerCamelCase__ : str = True lowerCamelCase__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""" ) lowerCamelCase__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ) lowerCamelCase__ : Optional[int] = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase_ ( self: Optional[int] ): with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Dict = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__ ): lowerCamelCase__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def lowerCamelCase_ ( self: Optional[int] ): with self.assertRaises(UpperCamelCase__ ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowerCamelCase__ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ) lowerCamelCase__ : str = self.model_fpaa.to(torch.floataa ) lowerCamelCase__ : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error lowerCamelCase__ : str = self.model_fpaa.to("""cpu""" ) # Check this does not throw an error lowerCamelCase__ : Any = self.model_fpaa.half() # Check this does not throw an error lowerCamelCase__ : Optional[Any] = self.model_fpaa.float() def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : str = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowercase ( unittest.TestCase ): @classmethod def lowerCamelCase_ ( cls: List[str] ): lowerCamelCase__ : List[Any] = """t5-small""" lowerCamelCase__ : int = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained(cls.model_name ) lowerCamelCase__ : Optional[Any] = """Translate in German: Hello, my dog is cute""" def lowerCamelCase_ ( self: Optional[Any] ): gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: str ): from transformers import TaForConditionalGeneration lowerCamelCase__ : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules lowerCamelCase__ : Dict = None # test with `t5-small` lowerCamelCase__ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) lowerCamelCase__ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowerCamelCase__ : List[Any] = model.generate(**UpperCamelCase__ ) # test with `flan-t5-small` lowerCamelCase__ : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) lowerCamelCase__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowerCamelCase__ : Dict = model.generate(**UpperCamelCase__ ) lowerCamelCase__ : Tuple = modules def lowerCamelCase_ ( self: int ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowerCamelCase__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowerCamelCase__ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowerCamelCase__ : int = model.generate(**UpperCamelCase__ ) # test with `flan-t5-small` lowerCamelCase__ : Optional[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) lowerCamelCase__ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) lowerCamelCase__ : List[str] = model.generate(**UpperCamelCase__ ) class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: str ): super().setUp() # model_name lowerCamelCase__ : List[str] = """bigscience/bloom-560m""" lowerCamelCase__ : List[Any] = """t5-small""" # Different types of model lowerCamelCase__ : str = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # Sequence classification model lowerCamelCase__ : int = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # CausalLM model lowerCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) # Seq2seq model lowerCamelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" ) def lowerCamelCase_ ( self: Optional[int] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: str ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: List[str] ): super().setUp() def lowerCamelCase_ ( self: str ): del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Union[str, Any] = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowerCamelCase__ : List[str] = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Dict ): super().setUp() def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowerCamelCase__ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch lowerCamelCase__ : str = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Any = """facebook/opt-350m""" super().setUp() def lowerCamelCase_ ( self: Any ): if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters lowerCamelCase__ : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowerCamelCase__ : List[Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowerCamelCase__ : Optional[int] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__ ) ): lowerCamelCase__ : List[str] = LoRALayer(module.q_proj , rank=16 ) lowerCamelCase__ : List[str] = LoRALayer(module.k_proj , rank=16 ) lowerCamelCase__ : Tuple = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch lowerCamelCase__ : Any = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowerCamelCase__ : str = model.forward(**UpperCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(UpperCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _lowercase ( _lowercase ): a = """gpt2-xl""" a = 3.31_91_85_48_54_15_21_87
631
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : List[str] = mask_ratio lowerCamelCase__ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a = False a = False a = False a = False def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Dict ): pass def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) lowerCamelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Tuple = pt_noise super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy() lowerCamelCase__ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) # Make sure we don't have nans lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy() lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @slow def lowerCamelCase_ ( self: List[str] ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Tuple ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : List[str] = ViTMAEConfig() lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) ) # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
631
1
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration _A : List[str] ='''facebook/wmt19-en-de''' _A : Dict =FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model _A : List[str] =FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) _A : int =FSMTForConditionalGeneration(config) print(F'num of params {tiny_model.num_parameters()}') # Test _A : Optional[Any] =tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A : Optional[int] =tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save _A : int ='''tiny-wmt19-en-de''' tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-de
631
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( _lowercase ): a = """""" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ): super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : List[Any] = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] ) lowerCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" ) def lowerCamelCase_ ( self: Tuple ): if self.dir_cache is None: lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} lowerCamelCase__ : int = {f["""name"""]: f} def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): return self.file.open().read() def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _lowercase ( _lowercase ): a = """bz2""" a = """bz2""" a = """.bz2""" class _lowercase ( _lowercase ): a = """gzip""" a = """gzip""" a = """.gz""" class _lowercase ( _lowercase ): a = """lz4""" a = """lz4""" a = """.lz4""" class _lowercase ( _lowercase ): a = """xz""" a = """xz""" a = """.xz""" class _lowercase ( _lowercase ): a = """zstd""" a = """zstd""" a = """.zst""" def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ): super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = file_ def __enter__( self: List[Any] ): self._file.__enter__() return self def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ): self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self: Any ): return iter(self._file ) def lowerCamelCase_ ( self: List[Any] ): return next(self._file ) def __getattr__( self: List[str] , UpperCamelCase__: Dict ): return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) lowerCamelCase__ : Optional[Any] = fixed_enter
631
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _A : Any =get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=0 ) -> Union[str, Any]: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with FSDP.state_dict_type( UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ : Union[str, Any] = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ : List[str] = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' lowerCamelCase__ : int = os.path.join(UpperCamelCase , UpperCamelCase ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(UpperCamelCase , UpperCamelCase ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ : Dict = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCamelCase__ : int = os.path.join(UpperCamelCase , UpperCamelCase ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(UpperCamelCase , UpperCamelCase ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) logger.info(f'''Saving model to {ckpt_dir}''' ) lowerCamelCase__ : Optional[int] = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f'''Model saved to {ckpt_dir}''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(UpperCamelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return lowerCamelCase__ : str = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase ) logger.info(f'''Loading model from {input_model_file}''' ) lowerCamelCase__ : Any = torch.load(UpperCamelCase ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ : Tuple = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , UpperCamelCase ) logger.info(f'''Loading model from {input_model_file}''' ) lowerCamelCase__ : List[Any] = torch.load(UpperCamelCase ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ : List[Any] = ( os.path.join(UpperCamelCase , f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) lowerCamelCase__ : Optional[Any] = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=UpperCamelCase , storage_reader=dist_cp.FileSystemReader(UpperCamelCase ) , planner=DefaultLoadPlanner() , ) lowerCamelCase__ : Dict = state_dict["""model"""] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=0 ) -> Dict: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with FSDP.state_dict_type( UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ : Optional[Any] = FSDP.optim_state_dict(UpperCamelCase , UpperCamelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowerCamelCase__ : List[Any] = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(UpperCamelCase , UpperCamelCase ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=0 ) -> Dict: accelerator.wait_for_everyone() with FSDP.state_dict_type( UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ : Any = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowerCamelCase__ : Any = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCamelCase__ : Optional[int] = os.path.join(UpperCamelCase , UpperCamelCase ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) lowerCamelCase__ : Tuple = torch.load(UpperCamelCase ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: lowerCamelCase__ : Optional[Any] = ( os.path.join(UpperCamelCase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) lowerCamelCase__ : Tuple = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(UpperCamelCase ) , ) lowerCamelCase__ : List[Any] = optim_state["""optimizer"""] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) lowerCamelCase__ : Dict = FSDP.optim_state_dict_to_load(UpperCamelCase , UpperCamelCase , UpperCamelCase ) optimizer.load_state_dict(UpperCamelCase )
631
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _A : int =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: print("""Loading config file...""" ) def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): lowerCamelCase__ : Optional[int] = [] for k, v in d.items(): lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCamelCase ) lowerCamelCase__ : Any = argparse.Namespace() with open(UpperCamelCase , """r""" ) as yaml_file: try: lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader ) lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase ) for k, v in flat_cfg.items(): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) ) return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig() lowerCamelCase__ : str = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ : Optional[Any] = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : int = 384 else: lowerCamelCase__ : Optional[int] = 256 lowerCamelCase__ : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ : Tuple = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : str = 384 else: lowerCamelCase__ : Any = 256 lowerCamelCase__ : int = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ : Dict = 151 lowerCamelCase__ : str = 512 lowerCamelCase__ : List[Any] = """ade20k-id2label.json""" lowerCamelCase__ : Union[str, Any] = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ : Tuple = 21 lowerCamelCase__ : Optional[int] = 512 lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json""" lowerCamelCase__ : Tuple = True # orig_config lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase ) assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ : Tuple = """huggingface/label-files""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple: if base_model: lowerCamelCase__ : Optional[int] = """""" else: lowerCamelCase__ : Optional[Any] = """mobilevitv2.""" lowerCamelCase__ : List[str] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ : Optional[Any] = k[8:] else: lowerCamelCase__ : Optional[Any] = k if ".block." in k: lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ : int = [0, 1] elif i == 4: lowerCamelCase__ : str = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ : List[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase ) # load original state_dict lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval() lowerCamelCase__ : Tuple = False else: lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval() lowerCamelCase__ : Optional[Any] = False # remove and rename some keys of load the original model lowerCamelCase__ : Tuple = checkpoint remove_unused_keys(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load modified state_dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ : str = model(**UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ : Dict = outputs.logits lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ) assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Dict =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
631
1
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _A : Dict =DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _A : Tuple ='''main''' # Default branch name _A : Dict ='''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) _A : Dict ='''aaaaaaa''' # This commit does not exist, so we should 404. _A : Optional[Any] ='''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes _A : List[str] ='''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def SCREAMING_SNAKE_CASE_ () -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def SCREAMING_SNAKE_CASE_ () -> Tuple: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict ): # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class _lowercase ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def lowerCamelCase_ ( self: int , UpperCamelCase__: List[str] ): with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[int] ): with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Any ): with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def lowerCamelCase_ ( self: Union[str, Any] ): self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""start_positions""", """end_positions"""] ) class _lowercase ( _lowercase ): pass self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) @require_tf def lowerCamelCase_ ( self: List[str] ): self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(UpperCamelCase__ ) , ["""start_positions""", """end_positions"""] ) class _lowercase ( _lowercase ): pass self.assertEqual(find_labels(UpperCamelCase__ ) , ["""labels"""] ) @require_flax def lowerCamelCase_ ( self: Dict ): # Flax models don't have labels self.assertEqual(find_labels(UpperCamelCase__ ) , [] ) self.assertEqual(find_labels(UpperCamelCase__ ) , [] ) self.assertEqual(find_labels(UpperCamelCase__ ) , [] ) class _lowercase ( _lowercase ): pass self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
631
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) ) class _lowercase : def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = conv_kernel_size lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Tuple = width_multiplier lowerCamelCase__ : List[Any] = ffn_dropout lowerCamelCase__ : Any = attn_dropout def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: List[Any] ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ): lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) a = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = MobileViTVaModelTester(self ) lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple = [*signature.parameters.keys()] lowerCamelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs.hidden_states lowerCamelCase__ : List[Any] = 5 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ : int = 2 for i in range(len(UpperCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : str = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : str = outputs.logits # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Dict = model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = outputs.logits.detach().cpu() lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] ) lowerCamelCase__ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase__ : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
631
1
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : List[str] = mask_ratio lowerCamelCase__ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a = False a = False a = False a = False def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Dict ): pass def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) lowerCamelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Tuple = pt_noise super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy() lowerCamelCase__ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) # Make sure we don't have nans lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy() lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @slow def lowerCamelCase_ ( self: List[str] ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Tuple ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : List[str] = ViTMAEConfig() lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) ) # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
631
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Optional[Any] =logging.get_logger(__name__) _A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Tuple ={ '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } _A : List[Any] ={ '''gpt-neox-20b''': 2_048, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = add_prefix_space def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : int = input_ids[-self.model_max_length :] return input_ids
631
1
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A : Dict ='''scheduler_config.json''' class _lowercase ( _lowercase ): a = 1 a = 2 a = 3 a = 4 a = 5 a = 6 a = 7 a = 8 a = 9 a = 10 a = 11 a = 12 a = 13 a = 14 @dataclass class _lowercase ( _lowercase ): a = 42 class _lowercase : a = SCHEDULER_CONFIG_NAME a = [] a = True @classmethod def lowerCamelCase_ ( cls: int , UpperCamelCase__: Dict[str, Any] = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[Any]=False , **UpperCamelCase__: Union[str, Any] , ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__ , subfolder=UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ , return_commit_hash=UpperCamelCase__ , **UpperCamelCase__ , ) return cls.from_config(UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Union[str, os.PathLike] , UpperCamelCase__: bool = False , **UpperCamelCase__: Optional[int] ): self.save_config(save_directory=UpperCamelCase__ , push_to_hub=UpperCamelCase__ , **UpperCamelCase__ ) @property def lowerCamelCase_ ( self: str ): return self._get_compatibles() @classmethod def lowerCamelCase_ ( cls: str ): lowerCamelCase__ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) ) lowerCamelCase__ : Optional[int] = importlib.import_module(__name__.split(""".""" )[0] ) lowerCamelCase__ : List[str] = [ getattr(UpperCamelCase__ , UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__ , UpperCamelCase__ ) ] return compatible_classes
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Dict ={ '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =[ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' from maths.prime_factors import prime_factors def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Dict = f'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
631
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _A : int =get_tests_dir('''fixtures/test_sentencepiece.model''') _A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''} _A : int ='''>>zh<<''' _A : Dict ='''Helsinki-NLP/''' if is_torch_available(): _A : List[Any] ='''pt''' elif is_tf_available(): _A : Optional[int] ='''tf''' else: _A : Dict ='''jax''' @require_sentencepiece class _lowercase ( _lowercase , unittest.TestCase ): a = MarianTokenizer a = False a = True def lowerCamelCase_ ( self: List[str] ): super().setUp() lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ): return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = """</s>""" lowerCamelCase__ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase__ : List[str] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase_ ( self: List[str] ): # fmt: off lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) lowerCamelCase__ : str = """Tämä on testi""" lowerCamelCase__ : Any = """This is a test""" lowerCamelCase__ : int = [76, 7, 2_047, 2] lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2] lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
631
1
'''simple docstring''' from scipy.stats import spearmanr import datasets _A : Tuple =''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' _A : Union[str, Any] =''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' _A : Optional[Any] =r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def lowerCamelCase_ ( self: Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any]=False ): lowerCamelCase__ : Optional[Any] = spearmanr(UpperCamelCase__ , UpperCamelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """rwkv""" a = {"""max_position_embeddings""": """context_length"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = context_length lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase__ : List[str] = layer_norm_epsilon lowerCamelCase__ : int = rescale_every lowerCamelCase__ : Optional[int] = use_cache lowerCamelCase__ : Dict = bos_token_id lowerCamelCase__ : Any = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' from timeit import timeit def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) lowerCamelCase__ : Optional[int] = 0 while number: number &= number - 1 result += 1 return result def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) lowerCamelCase__ : Optional[Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def SCREAMING_SNAKE_CASE_ () -> None: def do_benchmark(UpperCamelCase ) -> None: lowerCamelCase__ : Union[str, Any] = """import __main__ as z""" print(f'''Benchmark when {number = }:''' ) print(f'''{get_set_bits_count_using_modulo_operator(UpperCamelCase ) = }''' ) lowerCamelCase__ : Union[str, Any] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=UpperCamelCase ) print(f'''timeit() runs in {timing} seconds''' ) print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCamelCase ) = }''' ) lowerCamelCase__ : int = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=UpperCamelCase , ) print(f'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : str =logging.get_logger(__name__) _A : int ={ '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """roc_bert""" def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Optional[Any] = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Tuple = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Tuple = type_vocab_size lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : List[Any] = use_cache lowerCamelCase__ : Tuple = enable_pronunciation lowerCamelCase__ : Union[str, Any] = enable_shape lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim lowerCamelCase__ : Any = pronunciation_vocab_size lowerCamelCase__ : int = shape_embed_dim lowerCamelCase__ : Tuple = shape_vocab_size lowerCamelCase__ : Optional[Any] = concat_input lowerCamelCase__ : str = position_embedding_type lowerCamelCase__ : Dict = classifier_dropout super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) ) class _lowercase : def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: str=32 , UpperCamelCase__: int=0.25 , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: Union[str, Any]="relu6" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: int=True , UpperCamelCase__: Dict=10 , UpperCamelCase__: Dict=None , ): lowerCamelCase__ : int = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : Optional[Any] = num_channels lowerCamelCase__ : Union[str, Any] = image_size lowerCamelCase__ : Optional[Any] = depth_multiplier lowerCamelCase__ : Tuple = min_depth lowerCamelCase__ : int = tf_padding lowerCamelCase__ : Union[str, Any] = int(last_hidden_size * depth_multiplier ) lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Optional[int] = hidden_act lowerCamelCase__ : str = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : List[Any] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : Any = scope def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Tuple = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: Union[str, Any] ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self: int , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : Optional[int] = MobileNetVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Any = self.num_labels lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = config_and_inputs lowerCamelCase__ : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () a = ( {"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Any = MobileNetVaModelTester(self ) lowerCamelCase__ : Union[str, Any] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @unittest.skip(reason="""MobileNetV1 does not output attentions""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Any ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : List[str] = [*signature.parameters.keys()] lowerCamelCase__ : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): def check_hidden_states_output(UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Any = outputs.hidden_states lowerCamelCase__ : Dict = 26 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : List[str] = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Optional[Any] = MobileNetVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Optional[Any] ): return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: str ): lowerCamelCase__ : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : Tuple = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
631
'''simple docstring''' import sys import turtle def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) _A : Any =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') _A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
631
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[int]: if length <= 0 or not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(UpperCamelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
631
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowercase : def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Union[str, Any] = 13 lowerCamelCase__ : Any = 7 lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : str = 99 lowerCamelCase__ : Dict = 384 lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Optional[Any] = 37 lowerCamelCase__ : Union[str, Any] = """gelu""" lowerCamelCase__ : int = 0.1 lowerCamelCase__ : Optional[Any] = 0.1 lowerCamelCase__ : List[Any] = 512 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Any = 2 lowerCamelCase__ : Optional[Any] = 0.02 lowerCamelCase__ : int = 3 lowerCamelCase__ : List[str] = 4 lowerCamelCase__ : Any = 128 lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Optional[Any] = 9 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Optional[int] = None def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : str = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : int = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ ) lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Tuple = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : int = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ): lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ ) lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : str = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a = False a = False a = False def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = TFConvBertModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = True lowerCamelCase__ : Tuple = True if hasattr(UpperCamelCase__ , """use_cache""" ): lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ ) lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""] lowerCamelCase__ : Any = outputs["""encoder_attentions"""] else: lowerCamelCase__ : int = outputs["""hidden_states"""] lowerCamelCase__ : Optional[int] = outputs["""attentions"""] self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase__ : Any = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase__: List[str] ): lowerCamelCase__ : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : List[Any] = True lowerCamelCase__ : int = True lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : Dict = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
631
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Optional[int] = tempfile.mkdtemp() lowerCamelCase__ : int = 5 # Realm tok lowerCamelCase__ : List[str] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase__ : str = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def lowerCamelCase_ ( self: Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : int = RealmConfig(num_block_records=self.num_block_records ) return config def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = np.array( [ b"""This is the first record""", b"""This is the second record""", b"""This is the third record""", b"""This is the fourth record""", b"""This is the fifth record""", b"""This is a longer longer longer record""", ] , dtype=UpperCamelCase__ , ) return block_records def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : str = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.get_config() lowerCamelCase__ : Tuple = self.get_dummy_retriever() lowerCamelCase__ : Any = retriever.tokenizer lowerCamelCase__ : int = np.array([0, 3] , dtype="""long""" ) lowerCamelCase__ : List[Any] = tokenizer(["""Test question"""] ).input_ids lowerCamelCase__ : Dict = tokenizer( ["""the fourth"""] , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ).input_ids lowerCamelCase__ : int = config.reader_seq_len lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = retriever( UpperCamelCase__ , UpperCamelCase__ , answer_ids=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="""np""" ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.get_config() lowerCamelCase__ : Tuple = self.get_dummy_retriever() lowerCamelCase__ : List[Any] = retriever.tokenizer lowerCamelCase__ : str = np.array([0, 3, 5] , dtype="""long""" ) lowerCamelCase__ : Dict = tokenizer(["""Test question"""] ).input_ids lowerCamelCase__ : str = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ).input_ids lowerCamelCase__ : Dict = config.reader_seq_len lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = retriever( UpperCamelCase__ , UpperCamelCase__ , answer_ids=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="""np""" ) self.assertEqual([False, True, True] , UpperCamelCase__ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCamelCase__ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path lowerCamelCase__ : Tuple = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: lowerCamelCase__ : Union[str, Any] = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) lowerCamelCase__ : Optional[int] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
631
'''simple docstring''' _A : List[str] ='''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
631
1
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _A : str =pd.read_csv('''sample_data.csv''', header=None) _A : Any =df.shape[:1][0] # If you're using some other dataset input the target column _A : int =df.iloc[:, 1:2] _A : List[str] =actual_data.values.reshape(len_data, 1) _A : Any =MinMaxScaler().fit_transform(actual_data) _A : Union[str, Any] =10 _A : Optional[Any] =5 _A : Any =20 _A : List[Any] =len_data - periods * look_back _A : Union[str, Any] =actual_data[:division] _A : Any =actual_data[division - look_back :] _A , _A : Tuple =[], [] _A , _A : Any =[], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _A : str =np.array(train_x) _A : Optional[int] =np.array(test_x) _A : Tuple =np.array([list(i.ravel()) for i in train_y]) _A : str =np.array([list(i.ravel()) for i in test_y]) _A : List[str] =Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') _A : Union[str, Any] =model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) _A : Any =model.predict(x_test)
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Any =logging.get_logger(__name__) _A : Dict ={ '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _lowercase ( _lowercase ): a = """trocr""" a = ["""past_key_values"""] a = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ): lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Tuple = d_model lowerCamelCase__ : Any = decoder_layers lowerCamelCase__ : Dict = decoder_attention_heads lowerCamelCase__ : str = decoder_ffn_dim lowerCamelCase__ : Tuple = activation_function lowerCamelCase__ : Dict = max_position_embeddings lowerCamelCase__ : int = dropout lowerCamelCase__ : int = attention_dropout lowerCamelCase__ : List[Any] = activation_dropout lowerCamelCase__ : Union[str, Any] = init_std lowerCamelCase__ : Optional[int] = decoder_layerdrop lowerCamelCase__ : Dict = use_cache lowerCamelCase__ : Any = scale_embedding lowerCamelCase__ : Optional[int] = use_learned_position_embeddings lowerCamelCase__ : List[str] = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
631
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]: lowerCamelCase__ : List[str] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: lowerCamelCase__ , lowerCamelCase__ : Any = emb.weight.shape lowerCamelCase__ : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) lowerCamelCase__ : List[Any] = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase="facebook/mbart-large-en-ro" , UpperCamelCase=False , UpperCamelCase=False ) -> str: lowerCamelCase__ : Any = torch.load(UpperCamelCase , map_location="""cpu""" )["""model"""] remove_ignore_keys_(UpperCamelCase ) lowerCamelCase__ : Dict = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ : Dict = MBartConfig.from_pretrained(UpperCamelCase , vocab_size=UpperCamelCase ) if mbart_aa and finetuned: lowerCamelCase__ : Tuple = """relu""" lowerCamelCase__ : Optional[int] = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ : Optional[int] = MBartForConditionalGeneration(UpperCamelCase ) model.model.load_state_dict(UpperCamelCase ) if finetuned: lowerCamelCase__ : Union[str, Any] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _A : List[str] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _A : Union[str, Any] =parser.parse_args() _A : Dict =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
631
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : str = [False] * len(UpperCamelCase ) lowerCamelCase__ : str = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Union[str, Any] = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
631
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _A : Any ={ '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Optional[int] = dataset lowerCamelCase__ : Optional[int] = process lowerCamelCase__ : List[str] = params def __len__( self: List[str] ): return len(self.dataset ) def __getitem__( self: Any , UpperCamelCase__: int ): lowerCamelCase__ : Dict = self.dataset[i] lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params ) return processed class _lowercase ( _lowercase ): def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ): lowerCamelCase__ : int = loader lowerCamelCase__ : str = infer lowerCamelCase__ : Optional[int] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : int = loader_batch_size # Internal bookkeeping lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None def __len__( self: Dict ): return len(self.loader ) def __iter__( self: Optional[int] ): lowerCamelCase__ : List[Any] = iter(self.loader ) return self def lowerCamelCase_ ( self: Any ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase__ : int = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Convert ModelOutput to tuple first lowerCamelCase__ : str = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase__ : List[str] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase__ : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ ) self._loader_batch_index += 1 return result def lowerCamelCase_ ( self: List[Any] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase__ : Optional[Any] = next(self.iterator ) lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Optional[Any] = processed else: lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0] lowerCamelCase__ : Any = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Any = len(UpperCamelCase__ ) else: lowerCamelCase__ : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase__ : List[Any] = processed lowerCamelCase__ : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _lowercase ( _lowercase ): def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ): super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __iter__( self: Union[str, Any] ): lowerCamelCase__ : str = iter(self.loader ) lowerCamelCase__ : int = None return self def lowerCamelCase_ ( self: str ): if self.subiterator is None: lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCamelCase__ : Tuple = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params ) lowerCamelCase__ : Union[str, Any] = next(self.subiterator ) return processed class _lowercase ( _lowercase ): def __iter__( self: List[Any] ): lowerCamelCase__ : int = iter(self.loader ) return self def lowerCamelCase_ ( self: Tuple ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCamelCase__ : List[str] = False lowerCamelCase__ : Union[str, Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : Any = self.loader_batch_item() lowerCamelCase__ : Tuple = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator while not is_last: lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCamelCase__ : Dict = processed else: lowerCamelCase__ : Dict = list(processed.keys() )[0] lowerCamelCase__ : Dict = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) else: lowerCamelCase__ : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase__ : str = observed_batch_size lowerCamelCase__ : str = processed lowerCamelCase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase__ : List[Any] = self.loader_batch_item() lowerCamelCase__ : str = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator else: lowerCamelCase__ : Optional[Any] = processed lowerCamelCase__ : Optional[int] = item.pop("""is_last""" ) accumulator.append(UpperCamelCase__ ) return accumulator class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ): lowerCamelCase__ : Union[str, Any] = dataset lowerCamelCase__ : str = key def __len__( self: Optional[Any] ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Any ): return self.dataset[i][self.key] class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ): lowerCamelCase__ : str = dataset lowerCamelCase__ : Dict = keya lowerCamelCase__ : List[str] = keya def __len__( self: str ): return len(self.dataset ) def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
631
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class _lowercase ( _lowercase ): def __init__( self: List[str] , UpperCamelCase__: pyspark.sql.DataFrame , UpperCamelCase__: Optional[NamedSplit] = None , UpperCamelCase__: Optional[Features] = None , UpperCamelCase__: bool = True , UpperCamelCase__: str = None , UpperCamelCase__: bool = False , UpperCamelCase__: str = None , UpperCamelCase__: bool = True , UpperCamelCase__: str = "arrow" , **UpperCamelCase__: Union[str, Any] , ): super().__init__( split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : int = load_from_cache_file lowerCamelCase__ : List[str] = file_format lowerCamelCase__ : Tuple = Spark( df=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , working_dir=UpperCamelCase__ , **UpperCamelCase__ , ) def lowerCamelCase_ ( self: Optional[Any] ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase__ : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
631
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _A : Dict ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
631
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=0.999 , UpperCamelCase="cosine" , ) -> Tuple: if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCamelCase__ : Optional[Any] = [] for i in range(UpperCamelCase ): lowerCamelCase__ : Any = i / num_diffusion_timesteps lowerCamelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) ) return torch.tensor(UpperCamelCase , dtype=torch.floataa ) class _lowercase ( _lowercase , _lowercase ): a = [e.name for e in KarrasDiffusionSchedulers] a = 2 @register_to_config def __init__( self: Tuple , UpperCamelCase__: int = 1_000 , UpperCamelCase__: float = 0.00_085 , UpperCamelCase__: float = 0.012 , UpperCamelCase__: str = "linear" , UpperCamelCase__: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase__: str = "epsilon" , UpperCamelCase__: str = "linspace" , UpperCamelCase__: int = 0 , ): if trained_betas is not None: lowerCamelCase__ : List[Any] = torch.tensor(UpperCamelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": lowerCamelCase__ : Dict = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCamelCase__ : int = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCamelCase__ : Any = betas_for_alpha_bar(UpperCamelCase__ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowerCamelCase__ : Union[str, Any] = 1.0 - self.betas lowerCamelCase__ : Optional[Any] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str , UpperCamelCase__: Any=None ): if schedule_timesteps is None: lowerCamelCase__ : int = self.timesteps lowerCamelCase__ : Optional[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCamelCase__ : Any = 1 if len(UpperCamelCase__ ) > 1 else 0 else: lowerCamelCase__ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase__ ) else timestep lowerCamelCase__ : str = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCamelCase_ ( self: List[str] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: Union[float, torch.FloatTensor] , ): lowerCamelCase__ : Any = self.index_for_timestep(UpperCamelCase__ ) if self.state_in_first_order: lowerCamelCase__ : Tuple = self.sigmas[step_index] else: lowerCamelCase__ : str = self.sigmas_interpol[step_index] lowerCamelCase__ : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Union[str, torch.device] = None , UpperCamelCase__: Optional[int] = None , ): lowerCamelCase__ : List[Any] = num_inference_steps lowerCamelCase__ : List[str] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCamelCase__ : Any = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase__ , dtype=UpperCamelCase__ )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCamelCase__ : Any = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase__ : Any = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase__ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCamelCase__ : Any = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase__ : Any = (np.arange(UpperCamelCase__ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase__ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) lowerCamelCase__ : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCamelCase__ : Union[str, Any] = torch.from_numpy(np.log(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = np.interp(UpperCamelCase__ , np.arange(0 , len(UpperCamelCase__ ) ) , UpperCamelCase__ ) lowerCamelCase__ : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCamelCase__ : Dict = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) # interpolate sigmas lowerCamelCase__ : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() lowerCamelCase__ : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowerCamelCase__ : Union[str, Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(UpperCamelCase__ ).startswith("""mps""" ): # mps does not support float64 lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ , dtype=torch.floataa ) else: lowerCamelCase__ : int = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) # interpolate timesteps lowerCamelCase__ : Tuple = self.sigma_to_t(UpperCamelCase__ ).to(UpperCamelCase__ , dtype=timesteps.dtype ) lowerCamelCase__ : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() lowerCamelCase__ : Dict = torch.cat([timesteps[:1], interleaved_timesteps] ) lowerCamelCase__ : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCamelCase__ : Tuple = defaultdict(UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ): # get log sigma lowerCamelCase__ : int = sigma.log() # get distribution lowerCamelCase__ : Dict = log_sigma - self.log_sigmas[:, None] # get sigmas range lowerCamelCase__ : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowerCamelCase__ : List[str] = low_idx + 1 lowerCamelCase__ : Optional[int] = self.log_sigmas[low_idx] lowerCamelCase__ : str = self.log_sigmas[high_idx] # interpolate sigmas lowerCamelCase__ : Optional[int] = (low - log_sigma) / (low - high) lowerCamelCase__ : int = w.clamp(0 , 1 ) # transform interpolation to time range lowerCamelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx lowerCamelCase__ : List[Any] = t.view(sigma.shape ) return t @property def lowerCamelCase_ ( self: Any ): return self.sample is None def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase__: Union[float, torch.FloatTensor] , UpperCamelCase__: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase__: bool = True , ): lowerCamelCase__ : int = self.index_for_timestep(UpperCamelCase__ ) # advance index counter by 1 lowerCamelCase__ : List[str] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase__ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCamelCase__ : Optional[Any] = self.sigmas[step_index] lowerCamelCase__ : Tuple = self.sigmas_interpol[step_index + 1] lowerCamelCase__ : Optional[Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowerCamelCase__ : Optional[int] = self.sigmas[step_index - 1] lowerCamelCase__ : Optional[Any] = self.sigmas_interpol[step_index] lowerCamelCase__ : List[str] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCamelCase__ : Optional[int] = 0 lowerCamelCase__ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCamelCase__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol lowerCamelCase__ : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCamelCase__ : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol lowerCamelCase__ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCamelCase__ : str = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCamelCase__ : Union[str, Any] = sigma_interpol - sigma_hat # store for 2nd order step lowerCamelCase__ : Any = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowerCamelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowerCamelCase__ : List[str] = sigma_next - sigma_hat lowerCamelCase__ : Dict = self.sample lowerCamelCase__ : str = None lowerCamelCase__ : Any = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCamelCase__ : Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase__ ): # mps does not support float64 lowerCamelCase__ : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowerCamelCase__ : Optional[Any] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowerCamelCase__ : List[str] = self.timesteps.to(original_samples.device ) lowerCamelCase__ : List[Any] = timesteps.to(original_samples.device ) lowerCamelCase__ : Tuple = [self.index_for_timestep(UpperCamelCase__ , UpperCamelCase__ ) for t in timesteps] lowerCamelCase__ : Union[str, Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCamelCase__ : List[Any] = sigma.unsqueeze(-1 ) lowerCamelCase__ : List[Any] = original_samples + noise * sigma return noisy_samples def __len__( self: Tuple ): return self.config.num_train_timesteps
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _A : Any ={ '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str =[ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : Optional[int] ={ '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """t5""" a = ["""past_key_values"""] a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self: Any , UpperCamelCase__: Any=32_128 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: str=64 , UpperCamelCase__: str=2_048 , UpperCamelCase__: Any=6 , UpperCamelCase__: Tuple=None , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[str]=32 , UpperCamelCase__: int=128 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Union[str, Any]=1e-6 , UpperCamelCase__: Tuple=1.0 , UpperCamelCase__: str="relu" , UpperCamelCase__: Dict=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Any=0 , UpperCamelCase__: int=1 , **UpperCamelCase__: List[str] , ): lowerCamelCase__ : int = vocab_size lowerCamelCase__ : str = d_model lowerCamelCase__ : Tuple = d_kv lowerCamelCase__ : str = d_ff lowerCamelCase__ : List[Any] = num_layers lowerCamelCase__ : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCamelCase__ : str = num_heads lowerCamelCase__ : Dict = relative_attention_num_buckets lowerCamelCase__ : int = relative_attention_max_distance lowerCamelCase__ : List[str] = dropout_rate lowerCamelCase__ : Any = layer_norm_epsilon lowerCamelCase__ : Dict = initializer_factor lowerCamelCase__ : Tuple = feed_forward_proj lowerCamelCase__ : Dict = use_cache lowerCamelCase__ : List[Any] = self.feed_forward_proj.split("""-""" ) lowerCamelCase__ : List[Any] = act_info[-1] lowerCamelCase__ : Optional[int] = act_info[0] == """gated""" if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCamelCase__ : List[str] = """gelu_new""" super().__init__( pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ , ) class _lowercase ( _lowercase ): @property def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[int] = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: lowerCamelCase__ : List[str] = """past_encoder_sequence + sequence""" lowerCamelCase__ : Dict = {0: """batch"""} lowerCamelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: lowerCamelCase__ : int = {0: """batch""", 1: """decoder_sequence"""} lowerCamelCase__ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" ) return common_inputs @property def lowerCamelCase_ ( self: Optional[Any] ): return 13
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Union[str, Any] =logging.get_logger(__name__) _A : List[str] ={ '''MIT/ast-finetuned-audioset-10-10-0.4593''': ( '''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json''' ), } class _lowercase ( _lowercase ): a = """audio-spectrogram-transformer""" def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : List[Any] = patch_size lowerCamelCase__ : List[str] = qkv_bias lowerCamelCase__ : Dict = frequency_stride lowerCamelCase__ : List[Any] = time_stride lowerCamelCase__ : str = max_length lowerCamelCase__ : Dict = num_mel_bins
631
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_lowercase ) class _lowercase ( _lowercase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a = Features({"""text""": Value("""string""" )} ) a = Features({"""summary""": Value("""string""" )} ) a = "text" a = "summary" @property def lowerCamelCase_ ( self: int ): return {self.text_column: "text", self.summary_column: "summary"}
631
'''simple docstring''' import argparse import os import re import packaging.version _A : List[str] ='''examples/''' _A : Any ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _A : int ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } _A : int ='''README.md''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : List[str] = f.read() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern] lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase ) lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: for folder, directories, fnames in os.walk(UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not patch: update_version_in_examples(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ : Dict = """1. Want to contribute a new model?""" with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : int = f.readlines() # Find the start of the list. lowerCamelCase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ : List[Any] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ : int = f.read() lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0] return packaging.version.parse(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ : List[str] = default_version.base_version elif patch: lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Optional[int] = default_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase , patch=UpperCamelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE_ () -> List[str]: lowerCamelCase__ : Optional[int] = get_version() lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ : Any = current_version.base_version # Check with the user we got that right. lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Dict = dev_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _A : List[str] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
631
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class _lowercase ( _lowercase ): def __get__( self: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("""unreadable attribute""" ) lowerCamelCase__ : Any = """__cached_""" + self.fget.__name__ lowerCamelCase__ : Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if cached is None: lowerCamelCase__ : Optional[int] = self.fget(UpperCamelCase__ ) setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return cached def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : int = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: if is_torch_fx_proxy(UpperCamelCase ): return True if is_torch_available(): import torch if isinstance(UpperCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCamelCase , np.ndarray ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: return isinstance(UpperCamelCase , np.ndarray ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]: return _is_numpy(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: import torch return isinstance(UpperCamelCase , torch.Tensor ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: return False if not is_torch_available() else _is_torch(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any: import torch return isinstance(UpperCamelCase , torch.device ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: return False if not is_torch_available() else _is_torch_device(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: import torch if isinstance(UpperCamelCase , UpperCamelCase ): if hasattr(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Dict = getattr(UpperCamelCase , UpperCamelCase ) else: return False return isinstance(UpperCamelCase , torch.dtype ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]: return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: import tensorflow as tf return isinstance(UpperCamelCase , tf.Tensor ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: return False if not is_tf_available() else _is_tensorflow(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCamelCase , """is_symbolic_tensor""" ): return tf.is_symbolic_tensor(UpperCamelCase ) return type(UpperCamelCase ) == tf.Tensor def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCamelCase , jnp.ndarray ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: return False if not is_flax_available() else _is_jax(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: if isinstance(UpperCamelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCamelCase ) for k, v in obj.items()} elif isinstance(UpperCamelCase , (list, tuple) ): return [to_py_obj(UpperCamelCase ) for o in obj] elif is_tf_tensor(UpperCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCamelCase ): return np.asarray(UpperCamelCase ).tolist() elif isinstance(UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: if isinstance(UpperCamelCase , (dict, UserDict) ): return {k: to_numpy(UpperCamelCase ) for k, v in obj.items()} elif isinstance(UpperCamelCase , (list, tuple) ): return np.array(UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): return obj.numpy() elif is_torch_tensor(UpperCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCamelCase ): return np.asarray(UpperCamelCase ) else: return obj class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Tuple = fields(self ) # Safety and consistency checks if not len(UpperCamelCase__ ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) lowerCamelCase__ : Optional[int] = getattr(self , class_fields[0].name ) lowerCamelCase__ : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(UpperCamelCase__ ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : str = first_field.items() lowerCamelCase__ : Tuple = True else: try: lowerCamelCase__ : List[str] = iter(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = True except TypeError: lowerCamelCase__ : Tuple = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(UpperCamelCase__ ): if ( not isinstance(UpperCamelCase__ , (list, tuple) ) or not len(UpperCamelCase__ ) == 2 or not isinstance(element[0] , UpperCamelCase__ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowerCamelCase__ : Optional[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: lowerCamelCase__ : str = element[1] elif first_field is not None: lowerCamelCase__ : Optional[Any] = first_field else: for field in class_fields: lowerCamelCase__ : Union[str, Any] = getattr(self , field.name ) if v is not None: lowerCamelCase__ : Tuple = v def __delitem__( self: Union[str, Any] , *UpperCamelCase__: Dict , **UpperCamelCase__: str ): raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Any ): raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: List[Any] , **UpperCamelCase__: int ): raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def lowerCamelCase_ ( self: Optional[Any] , *UpperCamelCase__: int , **UpperCamelCase__: Any ): raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self: List[Any] , UpperCamelCase__: int ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ : Dict = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(UpperCamelCase__ , UpperCamelCase__ ) super().__setattr__(UpperCamelCase__ , UpperCamelCase__ ) def __setitem__( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: int ): # Will raise a KeyException if needed super().__setitem__(UpperCamelCase__ , UpperCamelCase__ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): return tuple(self[k] for k in self.keys() ) class _lowercase ( _lowercase , _lowercase ): @classmethod def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase__: Optional[Any] ): raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class _lowercase ( _lowercase ): a = """longest""" a = """max_length""" a = """do_not_pad""" class _lowercase ( _lowercase ): a = """pt""" a = """tf""" a = """np""" a = """jax""" class _lowercase : def __init__( self: Optional[Any] , UpperCamelCase__: List[ContextManager] ): lowerCamelCase__ : Tuple = context_managers lowerCamelCase__ : Union[str, Any] = ExitStack() def __enter__( self: Union[str, Any] ): for context_manager in self.context_managers: self.stack.enter_context(UpperCamelCase__ ) def __exit__( self: List[str] , *UpperCamelCase__: List[Any] , **UpperCamelCase__: Union[str, Any] ): self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : int = infer_framework(UpperCamelCase ) if framework == "tf": lowerCamelCase__ : List[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models else: lowerCamelCase__ : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : int = model_class.__name__ lowerCamelCase__ : List[str] = infer_framework(UpperCamelCase ) if framework == "tf": lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCamelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: lowerCamelCase__ : Any = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = "" , UpperCamelCase = "." ) -> List[Any]: def _flatten_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): for k, v in d.items(): lowerCamelCase__ : Tuple = str(UpperCamelCase ) + delimiter + str(UpperCamelCase ) if parent_key else k if v and isinstance(UpperCamelCase , UpperCamelCase ): yield from flatten_dict(UpperCamelCase , UpperCamelCase , delimiter=UpperCamelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ) @contextmanager def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = False ) -> Tuple: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None ) -> Any: if is_numpy_array(UpperCamelCase ): return np.transpose(UpperCamelCase , axes=UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.T if axes is None else array.permute(*UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.transpose(UpperCamelCase , perm=UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.transpose(UpperCamelCase , axes=UpperCamelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(UpperCamelCase )}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]: if is_numpy_array(UpperCamelCase ): return np.reshape(UpperCamelCase , UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.reshape(*UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.reshape(UpperCamelCase , UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.reshape(UpperCamelCase , UpperCamelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(UpperCamelCase )}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None ) -> str: if is_numpy_array(UpperCamelCase ): return np.squeeze(UpperCamelCase , axis=UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.squeeze(UpperCamelCase , axis=UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.squeeze(UpperCamelCase , axis=UpperCamelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(UpperCamelCase )}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]: if is_numpy_array(UpperCamelCase ): return np.expand_dims(UpperCamelCase , UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.unsqueeze(dim=UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.expand_dims(UpperCamelCase , axis=UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.expand_dims(UpperCamelCase , axis=UpperCamelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCamelCase )}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: if is_numpy_array(UpperCamelCase ): return np.size(UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.numel() elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.size(UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(UpperCamelCase )}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: for key, value in auto_map.items(): if isinstance(UpperCamelCase , (tuple, list) ): lowerCamelCase__ : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value] elif value is not None and "--" not in value: lowerCamelCase__ : List[Any] = f'''{repo_id}--{value}''' return auto_map def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: for base_class in inspect.getmro(UpperCamelCase ): lowerCamelCase__ : Optional[Any] = base_class.__module__ lowerCamelCase__ : Any = base_class.__name__ if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("""torch""" ) or name == "PreTrainedModel": return "pt" elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
631
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _A : Union[str, Any] =False class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ): set_seed(0 ) lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 ) lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCamelCase__ : List[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) lowerCamelCase__ : Any = DDIMScheduler( num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )] lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )] # train with a DDPM scheduler lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase__ ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
631
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Tuple ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. lowerCamelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] lowerCamelCase__ : List[str] = DisjunctiveConstraint(UpperCamelCase__ ) self.assertTrue(isinstance(dc.token_ids , UpperCamelCase__ ) ) with self.assertRaises(UpperCamelCase__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(UpperCamelCase__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def lowerCamelCase_ ( self: List[str] ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). lowerCamelCase__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(UpperCamelCase__ ): DisjunctiveConstraint(UpperCamelCase__ ) # fails here def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[Any] = [[1, 2, 3], [1, 2, 4]] lowerCamelCase__ : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dc.update(1 ) lowerCamelCase__ : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(UpperCamelCase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = dc.update(2 ) lowerCamelCase__ : Any = stepped is True and completed is False and reset is False self.assertTrue(UpperCamelCase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = dc.update(3 ) lowerCamelCase__ : List[str] = stepped is True and completed is True and reset is False self.assertTrue(UpperCamelCase__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowerCamelCase__ : Optional[int] = DisjunctiveConstraint(UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
631
'''simple docstring''' from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : Optional[int] = 0 # Number of processes finished lowerCamelCase__ : Union[str, Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ : Tuple = [0] * no_of_process # List to include calculation results lowerCamelCase__ : List[str] = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )] lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ : str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ : Union[str, Any] = arrival_time[i] lowerCamelCase__ : Any = 0 # Index showing the location of the process being performed lowerCamelCase__ : Union[str, Any] = 0 # Saves the current response ratio. lowerCamelCase__ : Any = 0 for i in range(0 , UpperCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ : int = temp lowerCamelCase__ : str = i # Calculate the turn around time lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ : List[str] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: lowerCamelCase__ : int = [0] * no_of_process for i in range(0 , UpperCamelCase ): lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A : List[str] =5 _A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E'''] _A : Optional[int] =[1, 2, 3, 4, 5] _A : Dict =[1, 2, 3, 4, 5] _A : Any =calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A : Optional[int] =calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
631
1
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _A : List[Any] ='''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _lowercase : a = PegasusConfig a = {} a = """gelu""" def __init__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any]=13 , UpperCamelCase__: Optional[Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: str=False , UpperCamelCase__: str=99 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Union[str, Any]=5 , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Union[str, Any]=37 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[int]=20 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: Union[str, Any]=1 , UpperCamelCase__: Optional[Any]=0 , ): lowerCamelCase__ : str = parent lowerCamelCase__ : Any = batch_size lowerCamelCase__ : Tuple = seq_length lowerCamelCase__ : Dict = is_training lowerCamelCase__ : str = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Union[str, Any] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Tuple = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : List[Any] = eos_token_id lowerCamelCase__ : List[Any] = pad_token_id lowerCamelCase__ : Optional[int] = bos_token_id def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) lowerCamelCase__ : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase__ : Optional[Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, inputs_dict def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Any = 20 lowerCamelCase__ : Optional[int] = model_class_name(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model.encode(inputs_dict["""input_ids"""] ) lowerCamelCase__ , lowerCamelCase__ : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCamelCase__ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowerCamelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase__ : str = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) lowerCamelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCamelCase__ : List[Any] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase__ , ) lowerCamelCase__ : Tuple = model.decode(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: Dict ): lowerCamelCase__ : Union[str, Any] = 20 lowerCamelCase__ : int = model_class_name(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model.encode(inputs_dict["""input_ids"""] ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCamelCase__ : List[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCamelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase__ : Dict = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) lowerCamelCase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCamelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) lowerCamelCase__ : Tuple = model.decode(UpperCamelCase__ , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ ) lowerCamelCase__ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , ) -> str: if attention_mask is None: lowerCamelCase__ : Optional[int] = np.not_equal(UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowerCamelCase__ : Union[str, Any] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _lowercase ( _lowercase , unittest.TestCase ): a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () a = True a = False a = False a = False def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Tuple = FlaxPegasusModelTester(self ) lowerCamelCase__ : Tuple = ConfigTester(self , config_class=UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = model_class(UpperCamelCase__ ) @jax.jit def encode_jitted(UpperCamelCase__: Optional[int] , UpperCamelCase__: str=None , **UpperCamelCase__: int ): return model.encode(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) with self.subTest("""JIT Enabled""" ): lowerCamelCase__ : Tuple = encode_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCamelCase__ : Tuple = encode_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) lowerCamelCase__ : Optional[int] = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): return model.decode( decoder_input_ids=UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , encoder_outputs=UpperCamelCase__ , ) with self.subTest("""JIT Enabled""" ): lowerCamelCase__ : Dict = decode_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCamelCase__ : Optional[Any] = decode_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self: Optional[Any] ): for model_class_name in self.all_model_classes: lowerCamelCase__ : List[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase__ ) lowerCamelCase__ : int = np.ones((1, 1) ) lowerCamelCase__ : int = model(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) lowerCamelCase__ : Dict = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) lowerCamelCase__ : Tuple = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] lowerCamelCase__ : Any = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] lowerCamelCase__ : Dict = tokenizer(UpperCamelCase__ , return_tensors="""np""" , truncation=UpperCamelCase__ , max_length=512 , padding=UpperCamelCase__ ) lowerCamelCase__ : str = model.generate(**UpperCamelCase__ , num_beams=2 ).sequences lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) assert tgt_text == decoded
631
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
631
1
'''simple docstring''' import argparse import os import re import packaging.version _A : List[str] ='''examples/''' _A : Any ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } _A : int ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } _A : int ='''README.md''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : List[str] = f.read() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern] lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase ) lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: for folder, directories, fnames in os.walk(UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not patch: update_version_in_examples(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ : Dict = """1. Want to contribute a new model?""" with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ : int = f.readlines() # Find the start of the list. lowerCamelCase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ : List[Any] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Optional[Any]: with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ : int = f.read() lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0] return packaging.version.parse(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ : List[str] = default_version.base_version elif patch: lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Optional[int] = default_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase , patch=UpperCamelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE_ () -> List[str]: lowerCamelCase__ : Optional[int] = get_version() lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ : Any = current_version.base_version # Check with the user we got that right. lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(UpperCamelCase ) == 0: lowerCamelCase__ : Dict = dev_version print(f'''Updating version to {version}.''' ) global_version_update(UpperCamelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') _A : List[str] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
631
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = patch_size lowerCamelCase__ : Any = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : List[str] = mask_ratio lowerCamelCase__ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a = False a = False a = False a = False def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Tuple = ViTMAEModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self: Dict ): pass def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Any = model_class(UpperCamelCase__ ) lowerCamelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ : Tuple = pt_noise super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy() lowerCamelCase__ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) # Make sure we don't have nans lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy() lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: Any ): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self: Tuple ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Optional[int] ): pass @slow def lowerCamelCase_ ( self: List[str] ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: List[str] ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: Tuple ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.default_image_processor lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ : List[str] = ViTMAEConfig() lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) ) # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
631
1
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _A : Dict =logging.getLogger(__name__) class _lowercase ( _lowercase ): a = """masked_bert""" def __init__( self: Dict , UpperCamelCase__: int=30_522 , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Dict=12 , UpperCamelCase__: List[Any]=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: Dict=0 , UpperCamelCase__: Tuple="topK" , UpperCamelCase__: int="constant" , UpperCamelCase__: Dict=0.0 , **UpperCamelCase__: List[str] , ): super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : int = hidden_size lowerCamelCase__ : Union[str, Any] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Union[str, Any] = intermediate_size lowerCamelCase__ : int = hidden_dropout_prob lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob lowerCamelCase__ : int = max_position_embeddings lowerCamelCase__ : Tuple = type_vocab_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Any = layer_norm_eps lowerCamelCase__ : Optional[int] = pruning_method lowerCamelCase__ : Optional[int] = mask_init lowerCamelCase__ : Tuple = mask_scale
631
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( _lowercase ): a = """""" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ): super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : List[Any] = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] ) lowerCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" ) def lowerCamelCase_ ( self: Tuple ): if self.dir_cache is None: lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} lowerCamelCase__ : int = {f["""name"""]: f} def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ): return self.file.open().read() def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _lowercase ( _lowercase ): a = """bz2""" a = """bz2""" a = """.bz2""" class _lowercase ( _lowercase ): a = """gzip""" a = """gzip""" a = """.gz""" class _lowercase ( _lowercase ): a = """lz4""" a = """lz4""" a = """.lz4""" class _lowercase ( _lowercase ): a = """xz""" a = """xz""" a = """.xz""" class _lowercase ( _lowercase ): a = """zstd""" a = """zstd""" a = """.zst""" def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ): super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : def __init__( self: Optional[int] , UpperCamelCase__: Any ): lowerCamelCase__ : Optional[int] = file_ def __enter__( self: List[Any] ): self._file.__enter__() return self def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ): self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self: Any ): return iter(self._file ) def lowerCamelCase_ ( self: List[Any] ): return next(self._file ) def __getattr__( self: List[str] , UpperCamelCase__: Dict ): return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) lowerCamelCase__ : Optional[Any] = fixed_enter
631
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Union[str, Any] =logging.get_logger(__name__) _A : List[str] ={ '''MIT/ast-finetuned-audioset-10-10-0.4593''': ( '''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json''' ), } class _lowercase ( _lowercase ): a = """audio-spectrogram-transformer""" def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : List[Any] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : List[Any] = patch_size lowerCamelCase__ : List[str] = qkv_bias lowerCamelCase__ : Dict = frequency_stride lowerCamelCase__ : List[Any] = time_stride lowerCamelCase__ : str = max_length lowerCamelCase__ : Dict = num_mel_bins
631
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _A : int =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: print("""Loading config file...""" ) def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): lowerCamelCase__ : Optional[int] = [] for k, v in d.items(): lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k if isinstance(UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(UpperCamelCase ) lowerCamelCase__ : Any = argparse.Namespace() with open(UpperCamelCase , """r""" ) as yaml_file: try: lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader ) lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase ) for k, v in flat_cfg.items(): setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) ) return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig() lowerCamelCase__ : str = False # dataset if task_name.startswith("""imagenet1k_""" ): lowerCamelCase__ : Optional[Any] = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : int = 384 else: lowerCamelCase__ : Optional[int] = 256 lowerCamelCase__ : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): lowerCamelCase__ : Tuple = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: lowerCamelCase__ : str = 384 else: lowerCamelCase__ : Any = 256 lowerCamelCase__ : int = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): lowerCamelCase__ : Dict = 151 lowerCamelCase__ : str = 512 lowerCamelCase__ : List[Any] = """ade20k-id2label.json""" lowerCamelCase__ : Union[str, Any] = True elif task_name.startswith("""voc_""" ): lowerCamelCase__ : Tuple = 21 lowerCamelCase__ : Optional[int] = 512 lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json""" lowerCamelCase__ : Tuple = True # orig_config lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase ) assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label lowerCamelCase__ : Tuple = """huggingface/label-files""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : int = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple: if base_model: lowerCamelCase__ : Optional[int] = """""" else: lowerCamelCase__ : Optional[Any] = """mobilevitv2.""" lowerCamelCase__ : List[str] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase__ : Optional[Any] = k[8:] else: lowerCamelCase__ : Optional[Any] = k if ".block." in k: lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase__ : int = [0, 1] elif i == 4: lowerCamelCase__ : str = [0, 1, 2, 3] elif i == 5: lowerCamelCase__ : Dict = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase__ : List[Any] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Dict: lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase ) # load original state_dict lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval() lowerCamelCase__ : Tuple = False else: lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval() lowerCamelCase__ : Optional[Any] = False # remove and rename some keys of load the original model lowerCamelCase__ : Tuple = checkpoint remove_unused_keys(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load modified state_dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ : str = model(**UpperCamelCase ) # verify classification model if task_name.startswith("""imagenet""" ): lowerCamelCase__ : Dict = outputs.logits lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ) assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) _A : Dict =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
631
1
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
631
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( _lowercase ): def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) ) class _lowercase : def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ): lowerCamelCase__ : Any = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = conv_kernel_size lowerCamelCase__ : Any = output_stride lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Tuple = width_multiplier lowerCamelCase__ : List[Any] = ffn_dropout lowerCamelCase__ : Any = attn_dropout def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase_ ( self: List[Any] ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ): lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ): lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) a = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) a = False a = False a = False a = False def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Tuple = MobileViTVaModelTester(self ) lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self: List[str] ): pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def lowerCamelCase_ ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def lowerCamelCase_ ( self: int ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase_ ( self: Tuple ): pass def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Tuple = [*signature.parameters.keys()] lowerCamelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = outputs.hidden_states lowerCamelCase__ : List[Any] = 5 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase__ : int = 2 for i in range(len(UpperCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : str = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ): return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : int = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ ) lowerCamelCase__ : str = outputs.logits # verify the logits lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase__ : Dict = model(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = outputs.logits.detach().cpu() lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] ) lowerCamelCase__ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase__ : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
631
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """rwkv""" a = {"""max_position_embeddings""": """context_length"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = context_length lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase__ : List[str] = layer_norm_epsilon lowerCamelCase__ : int = rescale_every lowerCamelCase__ : Optional[int] = use_cache lowerCamelCase__ : Dict = bos_token_id lowerCamelCase__ : Any = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Optional[Any] =logging.get_logger(__name__) _A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Tuple ={ '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } _A : List[Any] ={ '''gpt-neox-20b''': 2_048, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = add_prefix_space def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : int = input_ids[-self.model_max_length :] return input_ids
631
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _A : Optional[int] ={ '''configuration_altclip''': [ '''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AltCLIPConfig''', '''AltCLIPTextConfig''', '''AltCLIPVisionConfig''', ], '''processing_altclip''': ['''AltCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any =[ '''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AltCLIPPreTrainedModel''', '''AltCLIPModel''', '''AltCLIPTextModel''', '''AltCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys _A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : Dict ={ '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] =[ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
631
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A : Optional[Any] =logging.get_logger(__name__) _A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Tuple ={ '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } _A : List[Any] ={ '''gpt-neox-20b''': 2_048, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = add_prefix_space def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ): lowerCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCamelCase__ : int = input_ids[-self.model_max_length :] return input_ids
631
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _A : int =get_tests_dir('''fixtures/test_sentencepiece.model''') _A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''} _A : int ='''>>zh<<''' _A : Dict ='''Helsinki-NLP/''' if is_torch_available(): _A : List[Any] ='''pt''' elif is_tf_available(): _A : Optional[int] ='''tf''' else: _A : Dict ='''jax''' @require_sentencepiece class _lowercase ( _lowercase , unittest.TestCase ): a = MarianTokenizer a = False a = True def lowerCamelCase_ ( self: List[str] ): super().setUp() lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ): return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = """</s>""" lowerCamelCase__ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def lowerCamelCase_ ( self: int ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase__ : List[str] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCamelCase_ ( self: List[str] ): # fmt: off lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) lowerCamelCase__ : str = """Tämä on testi""" lowerCamelCase__ : Any = """This is a test""" lowerCamelCase__ : int = [76, 7, 2_047, 2] lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2] lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
631
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]: lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Union[str, Any] = 2 while i * i <= n: lowerCamelCase__ : Optional[Any] = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def SCREAMING_SNAKE_CASE_ () -> Optional[int]: lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : str = 1 while True: i += 1 t_num += i if count_divisors(UpperCamelCase ) > 500: break return t_num if __name__ == "__main__": print(solution())
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Optional[Any] =logging.get_logger(__name__) _A : Optional[int] ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """rwkv""" a = {"""max_position_embeddings""": """context_length"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Optional[Any] = context_length lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase__ : List[str] = layer_norm_epsilon lowerCamelCase__ : int = rescale_every lowerCamelCase__ : Optional[int] = use_cache lowerCamelCase__ : Dict = bos_token_id lowerCamelCase__ : Any = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowercase : def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Union[str, Any] = 13 lowerCamelCase__ : Any = 7 lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : str = 99 lowerCamelCase__ : Dict = 384 lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Optional[Any] = 37 lowerCamelCase__ : Union[str, Any] = """gelu""" lowerCamelCase__ : int = 0.1 lowerCamelCase__ : Optional[Any] = 0.1 lowerCamelCase__ : List[Any] = 512 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Any = 2 lowerCamelCase__ : Optional[Any] = 0.02 lowerCamelCase__ : int = 3 lowerCamelCase__ : List[str] = 4 lowerCamelCase__ : Any = 128 lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Optional[Any] = 9 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Optional[int] = None def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : str = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : int = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ ) lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Tuple = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : int = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ): lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ ) lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : str = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a = False a = False a = False def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = TFConvBertModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = True lowerCamelCase__ : Tuple = True if hasattr(UpperCamelCase__ , """use_cache""" ): lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ ) lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""] lowerCamelCase__ : Any = outputs["""encoder_attentions"""] else: lowerCamelCase__ : int = outputs["""hidden_states"""] lowerCamelCase__ : Optional[int] = outputs["""attentions"""] self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase__ : Any = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase__: List[str] ): lowerCamelCase__ : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : List[Any] = True lowerCamelCase__ : int = True lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : Dict = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
631
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : str =logging.get_logger(__name__) _A : int ={ '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _lowercase ( _lowercase ): a = """roc_bert""" def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Optional[Any] = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Tuple = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Tuple = type_vocab_size lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : List[Any] = use_cache lowerCamelCase__ : Tuple = enable_pronunciation lowerCamelCase__ : Union[str, Any] = enable_shape lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim lowerCamelCase__ : Any = pronunciation_vocab_size lowerCamelCase__ : int = shape_embed_dim lowerCamelCase__ : Tuple = shape_vocab_size lowerCamelCase__ : Optional[Any] = concat_input lowerCamelCase__ : str = position_embedding_type lowerCamelCase__ : Dict = classifier_dropout super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
631
1
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _lowercase ( _lowercase ): def __init__( self: Optional[int] , UpperCamelCase__: Distribution , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: Optional[int]=0 ): lowerCamelCase__ : List[str] = 1.0 if scale is None else scale lowerCamelCase__ : List[str] = 0.0 if loc is None else loc super().__init__(UpperCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase__ )] ) @property def lowerCamelCase_ ( self: Optional[int] ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase_ ( self: int ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase_ ( self: str ): return self.variance.sqrt() class _lowercase ( nn.Module ): def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase__: Tuple ): super().__init__(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = args_dim lowerCamelCase__ : int = nn.ModuleList([nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) for dim in args_dim.values()] ) lowerCamelCase__ : Optional[Any] = domain_map def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: torch.Tensor ): lowerCamelCase__ : str = [proj(UpperCamelCase__ ) for proj in self.proj] return self.domain_map(*UpperCamelCase__ ) class _lowercase ( nn.Module ): def __init__( self: Dict , UpperCamelCase__: Optional[Any] ): super().__init__() lowerCamelCase__ : List[str] = function def lowerCamelCase_ ( self: Any , UpperCamelCase__: int , *UpperCamelCase__: Optional[Any] ): return self.function(UpperCamelCase__ , *UpperCamelCase__ ) class _lowercase : a = 42 a = 42 a = 42 def __init__( self: Any , UpperCamelCase__: int = 1 ): lowerCamelCase__ : List[str] = dim lowerCamelCase__ : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict ): if self.dim == 1: return self.distribution_class(*UpperCamelCase__ ) else: return Independent(self.distribution_class(*UpperCamelCase__ ) , 1 ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None , ): lowerCamelCase__ : str = self._base_distribution(UpperCamelCase__ ) if loc is None and scale is None: return distr else: return AffineTransformed(UpperCamelCase__ , loc=UpperCamelCase__ , scale=UpperCamelCase__ , event_dim=self.event_dim ) @property def lowerCamelCase_ ( self: List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase_ ( self: Union[str, Any] ): return len(self.event_shape ) @property def lowerCamelCase_ ( self: str ): return 0.0 def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int ): return ParameterProjection( in_features=UpperCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase_ ( UpperCamelCase__: torch.Tensor ): return (x + torch.sqrt(torch.square(UpperCamelCase__ ) + 4.0 )) / 2.0 class _lowercase ( _lowercase ): a = {"df": 1, "loc": 1, "scale": 1} a = StudentT @classmethod def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ): lowerCamelCase__ : Tuple = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps ) lowerCamelCase__ : Union[str, Any] = 2.0 + cls.squareplus(UpperCamelCase__ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _lowercase ( _lowercase ): a = {"loc": 1, "scale": 1} a = Normal @classmethod def lowerCamelCase_ ( cls: Dict , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ): lowerCamelCase__ : List[Any] = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _lowercase ( _lowercase ): a = {"total_count": 1, "logits": 1} a = NegativeBinomial @classmethod def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ): lowerCamelCase__ : List[Any] = cls.squareplus(UpperCamelCase__ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict ): lowerCamelCase__ , lowerCamelCase__ : Optional[int] = distr_args if self.dim == 1: return self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) else: return Independent(self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) , 1 ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None ): lowerCamelCase__ , lowerCamelCase__ : List[str] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
631
'''simple docstring''' import sys import turtle def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) _A : Any =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') _A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
631
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _A : Optional[Any] =logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class _lowercase : a = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) a = field( default=_lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a = field( default=_lowercase , metadata={"""help""": """The column name of the images in the files."""} ) a = field(default=_lowercase , metadata={"""help""": """A folder containing the training data."""} ) a = field(default=_lowercase , metadata={"""help""": """A folder containing the validation data."""} ) a = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) a = field( default=_lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a = field( default=_lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Union[str, Any] = {} if self.train_dir is not None: lowerCamelCase__ : Tuple = self.train_dir if self.validation_dir is not None: lowerCamelCase__ : int = self.validation_dir lowerCamelCase__ : Union[str, Any] = data_files if data_files else None @dataclass class _lowercase : a = field( default=_lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) a = field( default=_lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) a = field( default=_lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) a = field( default=_lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) a = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a = field(default=_lowercase , metadata={"""help""": """Name or path of preprocessor config."""} ) a = field( default=_lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) a = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) a = field( default=_lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class _lowercase ( _lowercase ): a = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: lowerCamelCase__ : Optional[Any] = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , UpperCamelCase , UpperCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__ : Any = training_args.get_process_log_level() logger.setLevel(UpperCamelCase ) transformers.utils.logging.set_verbosity(UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowerCamelCase__ : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. lowerCamelCase__ : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCamelCase__ : List[Any] = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0: lowerCamelCase__ : Dict = ds["""train"""].train_test_split(data_args.train_val_split ) lowerCamelCase__ : Optional[Any] = split["""train"""] lowerCamelCase__ : List[str] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__ : Union[str, Any] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCamelCase ) elif model_args.model_name_or_path: lowerCamelCase__ : str = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase ) else: lowerCamelCase__ : Optional[int] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowerCamelCase__ : Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase ) elif model_args.model_name_or_path: lowerCamelCase__ : List[str] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase ) else: lowerCamelCase__ : str = ViTImageProcessor() # create model if model_args.model_name_or_path: lowerCamelCase__ : Any = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowerCamelCase__ : Tuple = ViTMAEForPreTraining(UpperCamelCase ) if training_args.do_train: lowerCamelCase__ : Any = ds["""train"""].column_names else: lowerCamelCase__ : Dict = ds["""validation"""].column_names if data_args.image_column_name is not None: lowerCamelCase__ : int = data_args.image_column_name elif "image" in column_names: lowerCamelCase__ : List[Any] = """image""" elif "img" in column_names: lowerCamelCase__ : str = """img""" else: lowerCamelCase__ : int = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowerCamelCase__ : int = image_processor.size["""shortest_edge"""] else: lowerCamelCase__ : List[str] = (image_processor.size["""height"""], image_processor.size["""width"""]) lowerCamelCase__ : Tuple = Compose( [ Lambda(lambda UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(UpperCamelCase ): lowerCamelCase__ : Optional[int] = [transforms(UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: lowerCamelCase__ : Tuple = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: lowerCamelCase__ : Optional[int] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCamelCase ) # Compute absolute learning rate lowerCamelCase__ : str = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowerCamelCase__ : List[Any] = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer lowerCamelCase__ : List[Any] = Trainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , ) # Training if training_args.do_train: lowerCamelCase__ : Optional[int] = None if training_args.resume_from_checkpoint is not None: lowerCamelCase__ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__ : Tuple = last_checkpoint lowerCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=UpperCamelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCamelCase__ : Tuple = trainer.evaluate() trainer.log_metrics("""eval""" , UpperCamelCase ) trainer.save_metrics("""eval""" , UpperCamelCase ) # Write model card and (optionally) push to hub lowerCamelCase__ : Tuple = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase ) else: trainer.create_model_card(**UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
631
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _lowercase : def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Dict = parent lowerCamelCase__ : Union[str, Any] = 13 lowerCamelCase__ : Any = 7 lowerCamelCase__ : int = True lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : str = 99 lowerCamelCase__ : Dict = 384 lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Optional[Any] = 37 lowerCamelCase__ : Union[str, Any] = """gelu""" lowerCamelCase__ : int = 0.1 lowerCamelCase__ : Optional[Any] = 0.1 lowerCamelCase__ : List[Any] = 512 lowerCamelCase__ : Optional[Any] = 16 lowerCamelCase__ : Any = 2 lowerCamelCase__ : Optional[Any] = 0.02 lowerCamelCase__ : int = 3 lowerCamelCase__ : List[str] = 4 lowerCamelCase__ : Any = 128 lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : Optional[Any] = 9 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Optional[int] = None def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : str = None if self.use_input_mask: lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : List[str] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : int = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ): lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ ) lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCamelCase__ : List[str] = [input_ids, input_mask] lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase__ : Tuple = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : int = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ): lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ ) lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase__ : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : str = config_and_inputs lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) a = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) a = False a = False a = False def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Dict = TFConvBertModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Dict = True lowerCamelCase__ : Tuple = True if hasattr(UpperCamelCase__ , """use_cache""" ): lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ ) lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""] lowerCamelCase__ : Any = outputs["""encoder_attentions"""] else: lowerCamelCase__ : int = outputs["""hidden_states"""] lowerCamelCase__ : Optional[int] = outputs["""attentions"""] self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ ) def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ): lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase__ : Any = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase__: List[str] ): lowerCamelCase__ : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCamelCase__ : str = model_class(UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Dict = model_class(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCamelCase__ : List[Any] = True lowerCamelCase__ : int = True lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : Dict = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Dict = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
631
1