code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''poolformer''' def __init__( self , _snake_case=3 , _snake_case=16 , _snake_case=16 , _snake_case=3 , _snake_case=4.0 , _snake_case=[2, 2, 6, 2] , _snake_case=[64, 128, 320, 512] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[2, 1, 1, 1] , _snake_case=4 , _snake_case=0.0 , _snake_case="gelu" , _snake_case=True , _snake_case=1e-5 , _snake_case=0.02 , **_snake_case , ): """simple docstring""" _lowerCAmelCase = num_channels _lowerCAmelCase = patch_size _lowerCAmelCase = stride _lowerCAmelCase = padding _lowerCAmelCase = pool_size _lowerCAmelCase = hidden_sizes _lowerCAmelCase = mlp_ratio _lowerCAmelCase = depths _lowerCAmelCase = patch_sizes _lowerCAmelCase = strides _lowerCAmelCase = num_encoder_blocks _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_layer_scale _lowerCAmelCase = layer_scale_init_value _lowerCAmelCase = initializer_range super().__init__(**_snake_case ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = version.parse('''1.11''' ) @property def snake_case ( self ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def snake_case ( self ): """simple docstring""" return 2e-3
82
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__ ( _lowerCamelCase , unittest.TestCase): A_ : str = ShapEImgaImgPipeline A_ : str = ['image'] A_ : int = ['image'] A_ : Tuple = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] A_ : Tuple = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 8 @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Any = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): __lowerCAmelCase : Any = CLIPImageProcessor( crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , ) return image_processor @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Dict = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE ) return model def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.dummy_prior __lowerCAmelCase : List[Any] = self.dummy_image_encoder __lowerCAmelCase : int = self.dummy_image_processor __lowerCAmelCase : Any = self.dummy_renderer __lowerCAmelCase : Any = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , ) __lowerCAmelCase : Tuple = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): __lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : str = 'cpu' __lowerCAmelCase : Dict = self.get_dummy_components() __lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Any = output.images[0] __lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = torch_device == 'cpu' __lowerCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.get_dummy_components() __lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) for key in inputs.keys(): if key in self.batch_params: __lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]] __lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) __lowerCAmelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) __lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) __lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __lowerCAmelCase : int = pipe( _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
86
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs snake_case__ : Union[str, Any] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') snake_case__ : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) def _snake_case ( ): lowerCAmelCase : Optional[int] = cn.convert_to_negative(_snake_case ) # assert negative_img array for at least one True assert negative_img.any() def _snake_case ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(_snake_case , 110 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def _snake_case ( ): lowerCAmelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _snake_case ( ): lowerCAmelCase : Union[str, Any] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCAmelCase : Dict = canny.canny(_snake_case ) # assert canny array for at least one True assert canny_array.any() def _snake_case ( ): assert gg.gaussian_filter(_snake_case , 5 , sigma=0.9 ).all() def _snake_case ( ): # laplace diagonals lowerCAmelCase : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCAmelCase : int = conv.img_convolve(_snake_case , _snake_case ).astype(_snake_case ) assert res.any() def _snake_case ( ): assert med.median_filter(_snake_case , 3 ).any() def _snake_case ( ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = sob.sobel_filter(_snake_case ) assert grad.any() and theta.any() def _snake_case ( ): lowerCAmelCase : str = sp.make_sepia(_snake_case , 20 ) assert sepia.all() def _snake_case ( _snake_case : str = "digital_image_processing/image_data/lena_small.jpg" ): lowerCAmelCase : Tuple = bs.Burkes(imread(_snake_case , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def _snake_case ( _snake_case : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCAmelCase : int = rs.NearestNeighbour(imread(_snake_case , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def _snake_case ( ): lowerCAmelCase : int = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCAmelCase : Dict = imread(_snake_case , 0 ) # Test for get_neighbors_pixel function() return not None lowerCAmelCase : List[Any] = 0 lowerCAmelCase : Any = 0 lowerCAmelCase : List[str] = image[x_coordinate][y_coordinate] lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel( _snake_case , _snake_case , _snake_case , _snake_case ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCAmelCase : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCAmelCase : str = lbp.local_binary_value(_snake_case , _snake_case , _snake_case ) assert lbp_image.any()
314
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCAmelCase__ : Tuple = random.Random() if is_torch_available(): import torch def lowercase_ ( _snake_case ,_snake_case=1.0 ,_snake_case=None ,_snake_case=None ): if rng is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = global_rng SCREAMING_SNAKE_CASE__ : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=20_00 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1_60_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : Dict = min_seq_length SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feature_size SCREAMING_SNAKE_CASE__ : Optional[Any] = padding_value SCREAMING_SNAKE_CASE__ : Any = sampling_rate SCREAMING_SNAKE_CASE__ : Optional[Any] = return_attention_mask SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize def __magic_name__ (self ) -> Dict: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __magic_name__ (self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> List[Any]: """simple docstring""" def _flatten(SCREAMING_SNAKE_CASE__ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) ) if equal_length: SCREAMING_SNAKE_CASE__ : int = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__ : str = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[str] = ASTFeatureExtractor def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ASTFeatureExtractionTester(self ) def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__ : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # Test batched SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values SCREAMING_SNAKE_CASE__ : str = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) @require_torch def __magic_name__ (self ) -> Tuple: """simple docstring""" import torch SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__ : Any = np.random.rand(1_00 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__ : Dict = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" from datasets import load_dataset SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__ : Union[str, Any] = ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] @require_torch def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[int] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__ : int = ASTFeatureExtractor() SCREAMING_SNAKE_CASE__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
25
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = 384 SCREAMING_SNAKE_CASE__ : Tuple = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE__ : int = 96 SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2) SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE__ : Tuple = 128 SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE__ : Optional[int] = 12 SCREAMING_SNAKE_CASE__ : Optional[int] = 512 elif "large" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = 192 SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48) SCREAMING_SNAKE_CASE__ : List[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 768 # set label information SCREAMING_SNAKE_CASE__ : Optional[Any] = 150 SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : str = SwinConfig( embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) SCREAMING_SNAKE_CASE__ : int = UperNetConfig( backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,) return config def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = val def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = x.shape[0] SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 ) SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case ) return x def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(_snake_case ,param.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case ) if "bn" in key: SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" ) SCREAMING_SNAKE_CASE__ : Dict = val # rename keys SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case ,_snake_case ,_snake_case ) read_in_q_k_v(_snake_case ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case ) if "norm" in key: SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case ) model.load_state_dict(_snake_case ) # verify on image SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits print(logits.shape ) print("""First values of logits:""" ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
1
from __future__ import annotations import math def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True __lowerCAmelCase : List[str] =[num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> list[int]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowercase = [] for num in range(len(lowerCAmelCase__ ) ): lowercase = 0 while 2 * i * i <= odd_composites[num]: lowercase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase__ ) == n: return list_nums return [] def UpperCAmelCase__ ( ) -> int: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
371
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class _A ( lowerCAmelCase ): def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def A__ ( self , __lowerCAmelCase=None ): """simple docstring""" lowercase = {} if top_k is not None: lowercase = top_k return {}, {}, postprocess_params def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = load_image(__lowerCAmelCase ) lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) return model_inputs def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = self.model(**__lowerCAmelCase ) return model_outputs def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ): """simple docstring""" if top_k > self.model.config.num_labels: lowercase = self.model.config.num_labels if self.framework == "pt": lowercase = model_outputs.logits.softmax(-1 )[0] lowercase , lowercase = probs.topk(__lowerCAmelCase ) elif self.framework == "tf": lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0] lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase ) lowercase , lowercase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'Unsupported framework: {self.framework}' ) lowercase = scores.tolist() lowercase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
32
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , A , A=7 , A=3 , A=1_0 , A=1_8 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=None , ) -> Dict: _UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 1_8} _UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} _UpperCAmelCase : Tuple = parent _UpperCAmelCase : str = batch_size _UpperCAmelCase : Any = num_channels _UpperCAmelCase : Optional[Any] = num_frames _UpperCAmelCase : Dict = image_size _UpperCAmelCase : List[Any] = min_resolution _UpperCAmelCase : Dict = max_resolution _UpperCAmelCase : Any = do_resize _UpperCAmelCase : Tuple = size _UpperCAmelCase : Tuple = do_normalize _UpperCAmelCase : Optional[int] = image_mean _UpperCAmelCase : int = image_std _UpperCAmelCase : Dict = crop_size def __lowerCAmelCase ( self ) -> Optional[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =VivitImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : List[str] = VivitImageProcessingTester(self ) @property def __lowerCAmelCase ( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , '''image_mean''' ) ) self.assertTrue(hasattr(A , '''image_std''' ) ) self.assertTrue(hasattr(A , '''do_normalize''' ) ) self.assertTrue(hasattr(A , '''do_resize''' ) ) self.assertTrue(hasattr(A , '''do_center_crop''' ) ) self.assertTrue(hasattr(A , '''size''' ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) _UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def __lowerCAmelCase ( self ) -> Dict: # Initialize image_processing _UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _UpperCAmelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _UpperCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCAmelCase : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Initialize image_processing _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _UpperCAmelCase : List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCAmelCase : str = image_processing(A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processing _UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _UpperCAmelCase : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
263
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase__ = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def snake_case_ ( A_ : Tuple, A_ : List[Any], A_ : Dict=None, A_ : int=None, A_ : Any=None, A_ : List[Any]=None, A_ : Optional[int]=None, A_ : int=None, ): '''simple docstring''' if attention_mask is None: _lowerCamelCase : Optional[int] = np.where(input_ids != config.pad_token_id, 1, 0 ) if decoder_attention_mask is None: _lowerCamelCase : str = np.where(decoder_input_ids != config.pad_token_id, 1, 0 ) if head_mask is None: _lowerCamelCase : str = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCamelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCamelCase : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __snake_case : def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=9_9 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : List[str]=0.02 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : Optional[int] = batch_size _lowerCamelCase : Any = seq_length _lowerCamelCase : List[Any] = is_training _lowerCamelCase : Optional[Any] = use_labels _lowerCamelCase : int = vocab_size _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[str] = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Tuple = eos_token_id _lowerCamelCase : Union[str, Any] = pad_token_id _lowerCamelCase : Union[str, Any] = bos_token_id _lowerCamelCase : Dict = initializer_range def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowerCamelCase : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowerCamelCase : int = shift_tokens_right(__lowerCAmelCase , 1 , 2 ) _lowerCamelCase : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCAmelCase , ) _lowerCamelCase : Any = prepare_blenderbot_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : List[str] = 2_0 _lowerCamelCase : List[str] = model_class_name(__lowerCAmelCase ) _lowerCamelCase : List[str] = model.encode(inputs_dict['''input_ids'''] ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) _lowerCamelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) _lowerCamelCase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) _lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , ) _lowerCamelCase : Tuple = model.decode(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Any = 2_0 _lowerCamelCase : Dict = model_class_name(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.encode(inputs_dict['''input_ids'''] ) _lowerCamelCase , _lowerCamelCase : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) _lowerCamelCase : Dict = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowerCamelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCamelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _lowerCamelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) _lowerCamelCase : Any = model.decode( decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , ) _lowerCamelCase : List[str] = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase ) _lowerCamelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __snake_case ( unittest.TestCase): snake_case__ : Dict = 9_9 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) _lowerCamelCase : Dict = input_ids.shape[0] _lowerCamelCase : str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self._get_config_and_data() _lowerCamelCase : int = FlaxBlenderbotForConditionalGeneration(__lowerCAmelCase ) _lowerCamelCase : Dict = lm_model(input_ids=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) _lowerCamelCase : Optional[Any] = FlaxBlenderbotForConditionalGeneration(__lowerCAmelCase ) _lowerCamelCase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) _lowerCamelCase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) _lowerCamelCase : Dict = lm_model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) _lowerCamelCase : List[str] = shift_tokens_right(__lowerCAmelCase , 1 , 2 ) _lowerCamelCase : Dict = np.equal(__lowerCAmelCase , 1 ).astype(np.floataa ).sum() _lowerCamelCase : Union[str, Any] = np.equal(__lowerCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__lowerCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __snake_case ( _lowercase , unittest.TestCase , _lowercase): snake_case__ : int = True snake_case__ : List[Any] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) snake_case__ : Any = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Tuple = FlaxBlenderbotModelTester(self ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) @jax.jit def encode_jitted(__lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Optional[Any] ): return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) with self.subTest('''JIT Enabled''' ): _lowerCamelCase : Optional[int] = encode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _lowerCamelCase : Any = encode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) _lowerCamelCase : Any = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ): return model.decode( decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , ) with self.subTest('''JIT Enabled''' ): _lowerCamelCase : Union[str, Any] = decode_jitted(**__lowerCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _lowerCamelCase : Union[str, Any] = decode_jitted(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" for model_class_name in self.all_model_classes: _lowerCamelCase : Dict = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowerCamelCase : Tuple = np.ones((1, 1) ) * model.config.eos_token_id _lowerCamelCase : Tuple = model(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5} _lowerCamelCase : List[Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} _lowerCamelCase : Optional[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) _lowerCamelCase : List[str] = ['''Sam'''] _lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase , return_tensors='''jax''' ) _lowerCamelCase : List[Any] = model.generate(**__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' _lowerCamelCase : Dict = tokenizer.batch_decode(__lowerCAmelCase , **__lowerCAmelCase ) assert generated_txt[0].strip() == tgt_text
175
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case ( _lowercase): def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Optional[int]="last" , __lowerCAmelCase : str=None , __lowerCAmelCase : int=None , ): """simple docstring""" _lowerCamelCase : Dict = parent _lowerCamelCase : List[str] = batch_size _lowerCamelCase : Dict = seq_length _lowerCamelCase : List[Any] = is_training _lowerCamelCase : Dict = use_input_lengths _lowerCamelCase : Tuple = use_token_type_ids _lowerCamelCase : Any = use_labels _lowerCamelCase : Optional[Any] = gelu_activation _lowerCamelCase : Optional[Any] = sinusoidal_embeddings _lowerCamelCase : Dict = causal _lowerCamelCase : Dict = asm _lowerCamelCase : str = n_langs _lowerCamelCase : str = vocab_size _lowerCamelCase : Optional[int] = n_special _lowerCamelCase : Dict = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : int = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Any = type_vocab_size _lowerCamelCase : Optional[int] = type_sequence_label_size _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : List[Any] = num_labels _lowerCamelCase : Dict = num_choices _lowerCamelCase : str = summary_type _lowerCamelCase : List[str] = use_proj _lowerCamelCase : int = scope def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[int] = None if self.use_input_lengths: _lowerCamelCase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : List[str] = None _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : str = ids_tensor([self.batch_size] , 2 ).float() _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Tuple = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ): """simple docstring""" _lowerCamelCase : Optional[Any] = FlaubertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : str = model(__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : Tuple = FlaubertWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , ): """simple docstring""" _lowerCamelCase : str = FlaubertForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) _lowerCamelCase : List[str] = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) ((_lowerCamelCase) , ) : str = result_with_labels.to_tuple() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) ((_lowerCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : Dict = FlaubertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase ) _lowerCamelCase : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : Any = self.num_labels _lowerCamelCase : List[str] = FlaubertForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , ): """simple docstring""" _lowerCamelCase : List[str] = self.num_choices _lowerCamelCase : Any = FlaubertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : int = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[Any] = config_and_inputs _lowerCamelCase : int = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : List[str] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : List[Any] = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ): """simple docstring""" _lowerCamelCase : Dict = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) _lowerCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = FlaubertModelTester(self ) _lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = FlaubertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _lowerCamelCase : Any = True _lowerCamelCase : int = model_class(config=__lowerCAmelCase ) _lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = torch.jit.trace( __lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) ) _lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) ) @require_torch class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) _lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): _lowerCamelCase : Any = model(__lowerCAmelCase )[0] _lowerCamelCase : Optional[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
175
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : int = { "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class __snake_case ( a ): UpperCAmelCase__ : Dict = '''vit''' def __init__( self : Any , _snake_case : List[Any]=768 , _snake_case : Any=12 , _snake_case : List[Any]=12 , _snake_case : Union[str, Any]=3072 , _snake_case : Dict="gelu" , _snake_case : List[str]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Optional[Any]=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Dict=224 , _snake_case : Tuple=16 , _snake_case : int=3 , _snake_case : List[str]=True , _snake_case : Optional[int]=16 , **_snake_case : Tuple , ): """simple docstring""" super().__init__(**_snake_case) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = encoder_stride class __snake_case ( a ): UpperCAmelCase__ : Optional[Any] = version.parse('''1.11''' ) @property def lowerCamelCase ( self : Optional[int]): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" return 1e-4
51
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem _lowerCamelCase = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 _lowerCamelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> str: if "://" in dataset_path: UpperCAmelCase_ = dataset_path.split('''://''' )[1] return dataset_path def SCREAMING_SNAKE_CASE ( __UpperCamelCase : fsspec.AbstractFileSystem ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def SCREAMING_SNAKE_CASE ( __UpperCamelCase : fsspec.AbstractFileSystem , __UpperCamelCase : str , __UpperCamelCase : str ) -> List[str]: UpperCAmelCase_ = not is_remote_filesystem(__UpperCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) ) else: fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase ) def SCREAMING_SNAKE_CASE ( ) -> None: if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = threading.Lock()
177
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str]=5 ) -> Dict: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 UpperCAmelCase_ = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1 UpperCAmelCase_ = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple UpperCAmelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() UpperCAmelCase_ = logits[0, masked_index, :] UpperCAmelCase_ = logits.softmax(dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ = prob.topk(k=__UpperCamelCase , dim=0 ) UpperCAmelCase_ = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] ) UpperCAmelCase_ = tokenizer.mask_token UpperCAmelCase_ = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): UpperCAmelCase_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(__UpperCamelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(__UpperCamelCase ) , __UpperCamelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(__UpperCamelCase , __UpperCamelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _lowerCamelCase = CamembertTokenizer.from_pretrained('camembert-base') _lowerCamelCase = CamembertForMaskedLM.from_pretrained('camembert-base') model.eval() _lowerCamelCase = 'Le camembert est <mask> :)' print(fill_mask(masked_input, model, tokenizer, topk=3))
177
1
class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ): """simple docstring""" _UpperCAmelCase = {} # A node will be a leaf if the tree contains its word _UpperCAmelCase = is_leaf _UpperCAmelCase = prefix def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = 0 for q, w in zip(self.prefix , UpperCAmelCase ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" for word in words: self.insert(UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" if self.prefix == word: _UpperCAmelCase = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: _UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase ) else: _UpperCAmelCase = self.nodes[word[0]] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match( UpperCAmelCase ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(UpperCAmelCase ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: _UpperCAmelCase = remaining_prefix _UpperCAmelCase = self.nodes[matching_string[0]] _UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = aux_node if remaining_word == "": _UpperCAmelCase = True else: self.nodes[matching_string[0]].insert(UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase ) if not incoming_node: return False else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match( UpperCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase ) if not incoming_node: return False else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match( UpperCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(UpperCAmelCase ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: _UpperCAmelCase = list(self.nodes.values() )[0] _UpperCAmelCase = merging_node.is_leaf self.prefix += merging_node.prefix _UpperCAmelCase = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: _UpperCAmelCase = False # If there is 1 edge, we merge it with its child else: _UpperCAmelCase = list(incoming_node.nodes.values() )[0] _UpperCAmelCase = merging_node.is_leaf incoming_node.prefix += merging_node.prefix _UpperCAmelCase = merging_node.nodes return True def UpperCamelCase ( self , UpperCAmelCase = 0 ): """simple docstring""" if self.prefix != "": print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __A ( )-> bool: """simple docstring""" _UpperCAmelCase = 'banana bananas bandana band apple all beast'.split() _UpperCAmelCase = RadixNode() root.insert_many(__lowerCAmelCase ) assert all(root.find(__lowerCAmelCase ) for word in words ) assert not root.find('bandanas' ) assert not root.find('apps' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def __A ( )-> None: """simple docstring""" assert test_trie() def __A ( )-> None: """simple docstring""" _UpperCAmelCase = RadixNode() _UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split() root.insert_many(__lowerCAmelCase ) print('Words:' , __lowerCAmelCase ) print('Tree:' ) root.print_tree() if __name__ == "__main__": main()
39
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase_ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self : str ) -> List[str]: lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach() self.assertEqual(output.shape , UpperCAmelCase__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) ) @slow def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach() self.assertEqual(output.shape , UpperCAmelCase__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
4
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase_ ( __A, __A, __A, __A, __A=True, __A="pt" ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ = {"add_prefix_space": True} if isinstance(__A, __A ) and not line.startswith(" " ) else {} UpperCAmelCase__ = padding_side return tokenizer( [line], max_length=__A, padding="max_length" if pad_to_max_length else None, truncation=__A, return_tensors=__A, add_special_tokens=__A, **__A, ) def lowerCAmelCase_ ( __A, __A, __A=None, ) -> Any: '''simple docstring''' UpperCAmelCase__ = input_ids.ne(__A ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A ( UpperCAmelCase_ ): def __init__(self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any="train" , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any="" , ) -> Optional[Any]: """simple docstring""" super().__init__() UpperCAmelCase__ = Path(__UpperCAmelCase ).joinpath(type_path + ".source" ) UpperCAmelCase__ = Path(__UpperCAmelCase ).joinpath(type_path + ".target" ) UpperCAmelCase__ = self.get_char_lens(self.src_file ) UpperCAmelCase__ = max_source_length UpperCAmelCase__ = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" UpperCAmelCase__ = tokenizer UpperCAmelCase__ = prefix if n_obs is not None: UpperCAmelCase__ = self.src_lens[:n_obs] UpperCAmelCase__ = src_lang UpperCAmelCase__ = tgt_lang def __len__(self : List[str] ) -> Any: """simple docstring""" return len(self.src_lens ) def __getitem__(self : str , __UpperCAmelCase : Union[str, Any] ) -> Dict[str, torch.Tensor]: """simple docstring""" UpperCAmelCase__ = index + 1 # linecache starts at 1 UpperCAmelCase__ = self.prefix + linecache.getline(str(self.src_file ) , __UpperCAmelCase ).rstrip("\n" ) UpperCAmelCase__ = linecache.getline(str(self.tgt_file ) , __UpperCAmelCase ).rstrip("\n" ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , __UpperCAmelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer ) UpperCAmelCase__ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer UpperCAmelCase__ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right" ) UpperCAmelCase__ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right" ) UpperCAmelCase__ = source_inputs["input_ids"].squeeze() UpperCAmelCase__ = target_inputs["input_ids"].squeeze() UpperCAmelCase__ = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowercase_ (__UpperCAmelCase : str ) -> Tuple: """simple docstring""" return [len(__UpperCAmelCase ) for x in Path(__UpperCAmelCase ).open().readlines()] def lowercase_ (self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Dict[str, torch.Tensor]: """simple docstring""" UpperCAmelCase__ = torch.stack([x["input_ids"] for x in batch] ) UpperCAmelCase__ = torch.stack([x["attention_mask"] for x in batch] ) UpperCAmelCase__ = torch.stack([x["decoder_input_ids"] for x in batch] ) UpperCAmelCase__ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ = trim_batch(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase ) UpperCAmelCase__ = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch UpperCamelCase__ = getLogger(__name__) def lowerCAmelCase_ ( __A ) -> Dict: '''simple docstring''' return list(itertools.chain.from_iterable(__A ) ) def lowerCAmelCase_ ( __A ) -> None: '''simple docstring''' UpperCAmelCase__ = get_git_info() save_json(__A, os.path.join(__A, "git_log.json" ) ) def lowerCAmelCase_ ( __A, __A, __A=4, **__A ) -> List[str]: '''simple docstring''' with open(__A, "w" ) as f: json.dump(__A, __A, indent=__A, **__A ) def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' with open(__A ) as f: return json.load(__A ) def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ = git.Repo(search_parent_directories=__A ) UpperCAmelCase__ = { "repo_id": str(__A ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase_ ( __A, __A ) -> List: '''simple docstring''' return list(map(__A, __A ) ) def lowerCAmelCase_ ( __A, __A ) -> List[Any]: '''simple docstring''' with open(__A, "wb" ) as f: return pickle.dump(__A, __A ) def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' def remove_articles(__A ): return re.sub(r"\b(a|an|the)\b", " ", __A ) def white_space_fix(__A ): return " ".join(text.split() ) def remove_punc(__A ): UpperCAmelCase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__A ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) ) def lowerCAmelCase_ ( __A, __A ) -> Any: '''simple docstring''' UpperCAmelCase__ = normalize_answer(__A ).split() UpperCAmelCase__ = normalize_answer(__A ).split() UpperCAmelCase__ = Counter(__A ) & Counter(__A ) UpperCAmelCase__ = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ = 1.0 * num_same / len(__A ) UpperCAmelCase__ = 1.0 * num_same / len(__A ) UpperCAmelCase__ = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase_ ( __A, __A ) -> Dict: '''simple docstring''' return normalize_answer(__A ) == normalize_answer(__A ) def lowerCAmelCase_ ( __A, __A ) -> Dict: '''simple docstring''' assert len(__A ) == len(__A ) UpperCAmelCase__ = 0 for hypo, pred in zip(__A, __A ): em += exact_match_score(__A, __A ) if len(__A ) > 0: em /= len(__A ) return {"em": em} def lowerCAmelCase_ ( __A ) -> int: '''simple docstring''' return model_prefix.startswith("rag" ) def lowerCAmelCase_ ( __A, __A, __A ) -> Any: '''simple docstring''' UpperCAmelCase__ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ = "dropout_rate" for p in extra_params: if getattr(__A, __A, __A ): if not hasattr(__A, __A ) and not hasattr(__A, equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(__A ) ) delattr(__A, __A ) continue UpperCAmelCase__ = p if hasattr(__A, __A ) else equivalent_param[p] setattr(__A, __A, getattr(__A, __A ) ) delattr(__A, __A ) return hparams, config
143
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = '▁' UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class A ( UpperCAmelCase_ , unittest.TestCase ): __UpperCAmelCase : int = BigBirdTokenizer __UpperCAmelCase : Optional[int] = BigBirdTokenizerFast __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : List[Any] = True def lowercase_ (self : Dict ) -> List[str]: """simple docstring""" super().setUp() UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ (self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = "<s>" UpperCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def lowercase_ (self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 ) def lowercase_ (self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def lowercase_ (self : Union[str, Any] ) -> Any: """simple docstring""" if not self.test_rust_tokenizer: return UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = self.get_rust_tokenizer() UpperCAmelCase__ = "I was born in 92000, and this is falsé." UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase ) UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase__ = self.get_rust_tokenizer() UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase ) UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowercase_ (self : str ) -> Tuple: """simple docstring""" UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def lowercase_ (self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def lowercase_ (self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = "Hello World!" UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def lowercase_ (self : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231 # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @require_torch @slow def lowercase_ (self : List[str] ) -> int: """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] UpperCAmelCase__ = " ".join(__UpperCAmelCase ) UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase ) UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase ) UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" ) UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCAmelCase ) model(**__UpperCAmelCase ) @slow def lowercase_ (self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def lowercase_ (self : Optional[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
143
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowercase : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Union[str, Any] = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a : int = logging.getLogger(__name__) @dataclass class a : """simple docstring""" a : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) a : bool = field( default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) a : bool = field( default=lowercase__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) a : Optional[int] = field( default=lowercase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) a : Optional[int] = field( default=lowercase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) a : Optional[int] = field( default=lowercase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) @dataclass class a : """simple docstring""" a : str = field( default=lowercase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) a : str = field( default=lowercase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) a : Optional[bool] = field( default=lowercase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , ) a : bool = field( default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) a : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) a : bool = field( default=lowercase__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) a : bool = field( default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def lowerCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , __lowerCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCAmelCase : List[Any] = training_args.get_process_log_level() logger.setLevel(__lowerCamelCase ) datasets.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __UpperCAmelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: __UpperCAmelCase : Tuple = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: __UpperCAmelCase : List[Any] = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : str = train_dataset.features["""label"""].names if training_args.do_eval: __UpperCAmelCase : Any = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : str = eval_dataset.features["""label"""].names if training_args.do_predict: __UpperCAmelCase : Optional[Any] = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : List[str] = predict_dataset.features["""label"""].names # Labels __UpperCAmelCase : Tuple = len(__lowerCamelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: __UpperCAmelCase : List[Any] = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __UpperCAmelCase : List[Any] = False def preprocess_function(__lowerCamelCase : int ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCamelCase , max_length=data_args.max_seq_length , truncation=__lowerCamelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: __UpperCAmelCase : int = min(len(__lowerCamelCase ) , data_args.max_train_samples ) __UpperCAmelCase : Dict = train_dataset.select(range(__lowerCamelCase ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): __UpperCAmelCase : Union[str, Any] = train_dataset.map( __lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: __UpperCAmelCase : Tuple = min(len(__lowerCamelCase ) , data_args.max_eval_samples ) __UpperCAmelCase : List[str] = eval_dataset.select(range(__lowerCamelCase ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): __UpperCAmelCase : Dict = eval_dataset.map( __lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: __UpperCAmelCase : Dict = min(len(__lowerCamelCase ) , data_args.max_predict_samples ) __UpperCAmelCase : Tuple = predict_dataset.select(range(__lowerCamelCase ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): __UpperCAmelCase : Any = predict_dataset.map( __lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function __UpperCAmelCase : Tuple = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCamelCase : EvalPrediction ): __UpperCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions __UpperCAmelCase : str = np.argmax(__lowerCamelCase , axis=1 ) return metric.compute(predictions=__lowerCamelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __UpperCAmelCase : Any = default_data_collator elif training_args.fpaa: __UpperCAmelCase : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) else: __UpperCAmelCase : int = None # Initialize our Trainer __UpperCAmelCase : Union[str, Any] = Trainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , ) # Training if training_args.do_train: __UpperCAmelCase : List[str] = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase : Union[str, Any] = last_checkpoint __UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase ) __UpperCAmelCase : Dict = train_result.metrics __UpperCAmelCase : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase ) ) __UpperCAmelCase : Dict = min(__lowerCamelCase , len(__lowerCamelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __lowerCamelCase ) trainer.save_metrics("""train""" , __lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=__lowerCamelCase ) __UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase ) __UpperCAmelCase : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) ) trainer.log_metrics("""eval""" , __lowerCamelCase ) trainer.save_metrics("""eval""" , __lowerCamelCase ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = trainer.predict(__lowerCamelCase , metric_key_prefix="""predict""" ) __UpperCAmelCase : int = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase ) ) __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , len(__lowerCamelCase ) ) trainer.log_metrics("""predict""" , __lowerCamelCase ) trainer.save_metrics("""predict""" , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.argmax(__lowerCamelCase , axis=1 ) __UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCamelCase , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(__lowerCamelCase ): __UpperCAmelCase : Tuple = label_list[item] writer.write(f"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
114
0
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : def __init__( self : List[str] , _A : str , _A : int=13 , _A : Tuple=7 , _A : Optional[Any]=True , _A : Optional[int]=True , _A : Any=True , _A : int=True , _A : Optional[Any]=99 , _A : List[Any]=16 , _A : Any=36 , _A : Any=6 , _A : Optional[int]=6 , _A : Union[str, Any]=6 , _A : Dict=37 , _A : List[Any]="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : str=512 , _A : Dict=16 , _A : str=2 , _A : str=0.0_2 , _A : int=3 , _A : int=4 , _A : Dict=None , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Dict = is_training UpperCAmelCase__ : Any = use_input_mask UpperCAmelCase__ : Union[str, Any] = use_token_type_ids UpperCAmelCase__ : Optional[int] = use_labels UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Tuple = embedding_size UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : Tuple = max_position_embeddings UpperCAmelCase__ : List[str] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Union[str, Any] = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : str = scope def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : int = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Dict = None if self.use_token_type_ids: UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : str = None UpperCAmelCase__ : str = None UpperCAmelCase__ : int = None if self.use_labels: UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowercase_ ( self : List[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AlbertModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A ) UpperCAmelCase__ : int = model(_A , token_type_ids=_A ) UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase_ ( self : Any , _A : Tuple , _A : Optional[Any] , _A : Tuple , _A : str , _A : List[str] , _A : Any , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertForPreTraining(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : int = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowercase_ ( self : Optional[Any] , _A : List[str] , _A : int , _A : Any , _A : List[Any] , _A : Tuple , _A : Tuple , _A : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AlbertForMaskedLM(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : Optional[int] , _A : str , _A : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = AlbertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[str] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Union[str, Any] , _A : str , _A : Tuple , _A : Optional[Any] , _A : int , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.num_labels UpperCAmelCase__ : Optional[int] = AlbertForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Union[str, Any] , _A : str , _A : Dict , _A : Any , _A : str , _A : int , _A : Tuple , _A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.num_labels UpperCAmelCase__ : List[str] = AlbertForTokenClassification(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self : Tuple , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : str , _A : Optional[Any] , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = self.num_choices UpperCAmelCase__ : Optional[int] = AlbertForMultipleChoice(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Any = config_and_inputs UpperCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True def lowercase_ ( self : Optional[int] , _A : int , _A : int , _A : List[str]=False ): '''simple docstring''' UpperCAmelCase__ : str = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : int = type self.model_tester.create_and_check_model(*_A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Dict = AlbertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = AlbertModel.from_pretrained('''albert-base-v2''' ) UpperCAmelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(_A , attention_mask=_A )[0] UpperCAmelCase__ : List[str] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) UpperCAmelCase__ : Optional[int] = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
299
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = 'data2vec-text' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->str: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = position_embedding_type SCREAMING_SNAKE_CASE : int = use_cache SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
313
1
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase : Optional[Any] = logging.getLogger() def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : str = {} _lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , "all_results.json" ) if os.path.exists(_lowerCamelCase ): with open(_lowerCamelCase , "r" ) as f: _lowerCamelCase : Any = json.load(_lowerCamelCase ) else: raise ValueError(F"""can't find {path}""" ) return results _lowerCAmelCase : Optional[Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class A_ ( _a ): def _lowercase ( self: Optional[int] ): '''simple docstring''' import xla_spawn _lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() _lowerCamelCase : List[str] = F""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): _lowerCamelCase : Union[str, Any] = time() xla_spawn.main() _lowerCamelCase : List[str] = time() _lowerCamelCase : int = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] ,0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start ,500 ) def _lowercase ( self: Dict ): '''simple docstring''' import xla_spawn _lowerCamelCase : Optional[Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): xla_spawn.main()
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : Tuple = 1 lowerCamelCase : int = 1 lowerCamelCase : Optional[Any] = {1: 1} for inputa in range(2 ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : List[str] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCamelCase : str = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCamelCase : str = counter if counter > pre_counter: lowerCamelCase : str = inputa lowerCamelCase : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME lowercase_ = ["small", "medium", "large"] lowercase_ = "lm_head.decoder.weight" lowercase_ = "lm_head.weight" def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ ) A__ = d.pop(SCREAMING_SNAKE_CASE__ ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) lowercase_ = parser.parse_args() for MODEL in DIALOGPT_MODELS: lowercase_ = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") lowercase_ = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
355
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[Any],lowercase_ : str )-> List[Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'],model_result['ss'] ): A__ = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' A__ = 'sgugger/tiny-distilbert-classification' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,only_pretrain_model=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,torchscript=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu','Cant do half precision' ) def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,fpaa=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = AutoConfig.from_pretrained(lowercase_ ) # set architectures equal to `None` A__ = None A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu','Can\'t do half precision' ) def snake_case__ ( self : List[Any] )-> Dict: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],fpaa=lowercase_,multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__ ( self : int )-> Optional[int]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' A__ = 'sshleifer/tinier_bart' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : List[str] )-> List[str]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__ ( self : int )-> Union[str, Any]: '''simple docstring''' A__ = 'sshleifer/tinier_bart' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,save_to_csv=lowercase_,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(lowercase_,'inf_time.csv' ),train_memory_csv_file=os.path.join(lowercase_,'train_mem.csv' ),inference_memory_csv_file=os.path.join(lowercase_,'inf_mem.csv' ),train_time_csv_file=os.path.join(lowercase_,'train_time.csv' ),env_info_csv_file=os.path.join(lowercase_,'env.csv' ),multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'env.csv' ) ).exists() ) def snake_case__ ( self : Tuple )-> str: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase_ : Optional[Any] ): self.assertTrue(hasattr(lowercase_,'sequential' ) ) self.assertTrue(hasattr(lowercase_,'cumulative' ) ) self.assertTrue(hasattr(lowercase_,'current' ) ) self.assertTrue(hasattr(lowercase_,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(lowercase_,'log.txt' ),log_print=lowercase_,trace_memory_line_by_line=lowercase_,multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowercase_,'log.txt' ) ).exists() )
282
0
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _SCREAMING_SNAKE_CASE : str = imread(r'''digital_image_processing/image_data/lena_small.jpg''') _SCREAMING_SNAKE_CASE : int = cvtColor(img, COLOR_BGR2GRAY) def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def UpperCAmelCase_ ( ): '''simple docstring''' with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 1_10 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def UpperCAmelCase_ ( ): '''simple docstring''' assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] ) SCREAMING_SNAKE_CASE__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def UpperCAmelCase_ ( ): '''simple docstring''' assert med.median_filter(_A , 3 ).any() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = sp.make_sepia(_A , 20 ) assert sepia.all() def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = bs.Burkes(imread(_A , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = rs.NearestNeighbour(imread(_A , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
314
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0] @deprecated(_A , '''Please use tf.data to implement this functionality.''' ) def UpperCAmelCase_ ( _A ): '''simple docstring''' print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_A ) as bytestream: SCREAMING_SNAKE_CASE__ = _readaa(_A ) if magic != 20_51: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) SCREAMING_SNAKE_CASE__ = _readaa(_A ) SCREAMING_SNAKE_CASE__ = _readaa(_A ) SCREAMING_SNAKE_CASE__ = _readaa(_A ) SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images ) SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta ) SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 ) return data @deprecated(_A , '''Please use tf.one_hot on tensors.''' ) def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = labels_dense.shape[0] SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) ) SCREAMING_SNAKE_CASE__ = 1 return labels_one_hot @deprecated(_A , '''Please use tf.data to implement this functionality.''' ) def UpperCAmelCase_ ( _A , _A=False , _A=10 ): '''simple docstring''' print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=_A ) as bytestream: SCREAMING_SNAKE_CASE__ = _readaa(_A ) if magic != 20_49: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) SCREAMING_SNAKE_CASE__ = _readaa(_A ) SCREAMING_SNAKE_CASE__ = bytestream.read(_A ) SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_A , _A ) return labels class UpperCAmelCase__ : """simple docstring""" @deprecated( __lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: SCREAMING_SNAKE_CASE__ = 1_0000 SCREAMING_SNAKE_CASE__ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' SCREAMING_SNAKE_CASE__ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 SCREAMING_SNAKE_CASE__ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa ) SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 ) SCREAMING_SNAKE_CASE__ = images SCREAMING_SNAKE_CASE__ = labels SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 @property def lowercase_ ( self : Tuple ) -> List[str]: return self._images @property def lowercase_ ( self : List[Any] ) -> Tuple: return self._labels @property def lowercase_ ( self : Tuple ) -> Tuple: return self._num_examples @property def lowercase_ ( self : Optional[int] ) -> int: return self._epochs_completed def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str: if fake_data: SCREAMING_SNAKE_CASE__ = [1] * 784 SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__lowerCamelCase )], [fake_label for _ in range(__lowerCamelCase )], ) SCREAMING_SNAKE_CASE__ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples ) numpy.random.shuffle(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = self.images[perma] SCREAMING_SNAKE_CASE__ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch SCREAMING_SNAKE_CASE__ = self._num_examples - start SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples] SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples ) numpy.random.shuffle(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = self.images[perm] SCREAMING_SNAKE_CASE__ = self.labels[perm] # Start next epoch SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples SCREAMING_SNAKE_CASE__ = self._index_in_epoch SCREAMING_SNAKE_CASE__ = self._images[start:end] SCREAMING_SNAKE_CASE__ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size SCREAMING_SNAKE_CASE__ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_A , '''Please write your own downloading logic.''' ) def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' if not gfile.Exists(_A ): gfile.MakeDirs(_A ) SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A ) if not gfile.Exists(_A ): urllib.request.urlretrieve(_A , _A ) # noqa: S310 with gfile.GFile(_A ) as f: SCREAMING_SNAKE_CASE__ = f.size() print('''Successfully downloaded''' , _A , _A , '''bytes.''' ) return filepath @deprecated( _A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ): '''simple docstring''' if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A ) SCREAMING_SNAKE_CASE__ = fake() SCREAMING_SNAKE_CASE__ = fake() SCREAMING_SNAKE_CASE__ = fake() return _Datasets(train=_A , validation=_A , test=_A ) if not source_url: # empty string check SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz''' SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz''' SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz''' SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz''' SCREAMING_SNAKE_CASE__ = _maybe_download( _A , _A , source_url + train_images_file ) with gfile.Open(_A , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ = _extract_images(_A ) SCREAMING_SNAKE_CASE__ = _maybe_download( _A , _A , source_url + train_labels_file ) with gfile.Open(_A , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A ) SCREAMING_SNAKE_CASE__ = _maybe_download( _A , _A , source_url + test_images_file ) with gfile.Open(_A , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ = _extract_images(_A ) SCREAMING_SNAKE_CASE__ = _maybe_download( _A , _A , source_url + test_labels_file ) with gfile.Open(_A , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A ) if not 0 <= validation_size <= len(_A ): SCREAMING_SNAKE_CASE__ = ( '''Validation size should be between 0 and ''' F'''{len(_A )}. Received: {validation_size}.''' ) raise ValueError(_A ) SCREAMING_SNAKE_CASE__ = train_images[:validation_size] SCREAMING_SNAKE_CASE__ = train_labels[:validation_size] SCREAMING_SNAKE_CASE__ = train_images[validation_size:] SCREAMING_SNAKE_CASE__ = train_labels[validation_size:] SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A ) SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A ) SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A ) return _Datasets(train=_A , validation=_A , test=_A )
314
1
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: UpperCAmelCase__ , UpperCAmelCase__ = arr[i + 1], arr[i] return arr if __name__ == "__main__": UpperCAmelCase_ = list(range(1_0, 0, -1)) print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
61
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return int(input_a == input_a == 0 ) def _UpperCamelCase ( ): '''simple docstring''' print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
1
from __future__ import annotations import math def __lowerCAmelCase ( a__ ) -> list[int]: if num <= 0: __a = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(__A ) __a = [True] * (num + 1) __a = [] __a = 2 __a = int(math.sqrt(__A ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(__A ) # Set multiples of start be False for i in range(start * start , num + 1 , __A ): if sieve[i] is True: __a = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(__A ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
6
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
0
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig UpperCamelCase : Optional[Any] = logging.getLogger(__name__) class UpperCamelCase ( a_ ): """simple docstring""" A : Tuple = "masked_bert" def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_) a : Union[str, Any] = vocab_size a : List[Any] = hidden_size a : List[str] = num_hidden_layers a : Any = num_attention_heads a : Optional[Any] = hidden_act a : str = intermediate_size a : Dict = hidden_dropout_prob a : Any = attention_probs_dropout_prob a : Any = max_position_embeddings a : Dict = type_vocab_size a : List[str] = initializer_range a : int = layer_norm_eps a : Dict = pruning_method a : List[str] = mask_init a : Union[str, Any] = mask_scale
345
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCamelCase : int = """true""" def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]: """simple docstring""" set_seed(42 ) a : List[str] = RegressionModel() a : Union[str, Any] = deepcopy(snake_case ) a : Dict = RegressionDataset(length=snake_case ) a : Dict = DataLoader(snake_case , batch_size=snake_case ) model.to(accelerator.device ) a , a : Optional[int] = accelerator.prepare(snake_case , snake_case ) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) a : Any = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(snake_case : int ): a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case ) return outputs with accelerator.main_process_first(): a : Dict = dataset.map( snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , ) a : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case : Optional[Any] ): if use_longest: return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' ) return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case ) a : List[str] = get_dataloader(snake_case , not dispatch_batches ) a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case ) a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" a : Dict = [] for batch in dataloader: a , a : Any = batch.values() with torch.no_grad(): a : Tuple = model(snake_case ) a , a : Dict = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) a , a : List[str] = [], [] for logit, targ in logits_and_targets: logits.append(snake_case ) targs.append(snake_case ) a , a : Any = torch.cat(snake_case ), torch.cat(snake_case ) return logits, targs def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]: """simple docstring""" a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case ) a , a : int = generate_predictions(snake_case , snake_case , snake_case ) assert ( len(snake_case ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}""" def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]: """simple docstring""" a : int = evaluate.load('glue' , 'mrpc' ) a , a : Tuple = get_mrpc_setup(snake_case , snake_case ) # First do baseline a , a , a : Tuple = setup['no'] model.to(snake_case ) model.eval() for batch in dataloader: batch.to(snake_case ) with torch.inference_mode(): a : List[Any] = model(**snake_case ) a : Optional[Any] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=snake_case , references=batch['labels'] ) a : Tuple = metric.compute() # Then do distributed a , a , a : Tuple = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): a : List[str] = model(**snake_case ) a : Optional[Any] = outputs.logits.argmax(dim=-1 ) a : Optional[int] = batch['labels'] a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=snake_case , references=snake_case ) a : str = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def SCREAMING_SNAKE_CASE__ ( ) -> str: """simple docstring""" a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(snake_case , snake_case ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(snake_case , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) a : Optional[Any] = Accelerator() test_torch_metrics(snake_case , 512 ) accelerator.state._reset_state() def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
345
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowercase ( snake_case_ ): lowercase = 'donut-swin' lowercase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : List[str] , snake_case : Any=2_2_4 , snake_case : Tuple=4 , snake_case : Optional[int]=3 , snake_case : List[Any]=9_6 , snake_case : Optional[Any]=[2, 2, 6, 2] , snake_case : Dict=[3, 6, 1_2, 2_4] , snake_case : str=7 , snake_case : List[str]=4.0 , snake_case : Dict=True , snake_case : Dict=0.0 , snake_case : Tuple=0.0 , snake_case : Dict=0.1 , snake_case : Optional[Any]="gelu" , snake_case : Any=False , snake_case : List[Any]=0.02 , snake_case : Optional[int]=1e-5 , **snake_case : Union[str, Any] , ) -> Optional[int]: """simple docstring""" super().__init__(**snake_case ) UpperCamelCase_ : Optional[Any] = image_size UpperCamelCase_ : Dict = patch_size UpperCamelCase_ : int = num_channels UpperCamelCase_ : Union[str, Any] = embed_dim UpperCamelCase_ : Any = depths UpperCamelCase_ : Optional[Any] = len(snake_case ) UpperCamelCase_ : Optional[int] = num_heads UpperCamelCase_ : List[str] = window_size UpperCamelCase_ : Optional[Any] = mlp_ratio UpperCamelCase_ : Dict = qkv_bias UpperCamelCase_ : Dict = hidden_dropout_prob UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCamelCase_ : Any = drop_path_rate UpperCamelCase_ : Tuple = hidden_act UpperCamelCase_ : Tuple = use_absolute_embeddings UpperCamelCase_ : Optional[Any] = layer_norm_eps UpperCamelCase_ : int = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase_ : Optional[int] = int(embed_dim * 2 ** (len(snake_case ) - 1) )
175
from maths.prime_check import is_prime def __lowercase ( lowerCamelCase : int ): if not isinstance(lowerCamelCase , lowerCamelCase ): UpperCamelCase_ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase ) if is_prime(lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
175
1
from collections.abc import Callable def lowerCamelCase_ ( _a : Callable[[float], float] , _a : float , _a : float ): '''simple docstring''' UpperCAmelCase_ : float = a UpperCAmelCase_ : float = b if function(_a ) == 0: # one of the a or b is a root for the function return a elif function(_a ) == 0: return b elif ( function(_a ) * function(_a ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: UpperCAmelCase_ : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_a ) == 0: return mid elif function(_a ) * function(_a ) < 0: UpperCAmelCase_ : List[str] = mid else: UpperCAmelCase_ : int = mid UpperCAmelCase_ : Dict = start + (end - start) / 2.0 return mid def lowerCamelCase_ ( _a : float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
354
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = ["pixel_values"] def __init__( self: Optional[Any] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> None: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" ) UpperCAmelCase_ : Union[str, Any] = do_resize UpperCAmelCase_ : Union[str, Any] = do_rescale UpperCAmelCase_ : str = do_normalize UpperCAmelCase_ : Optional[int] = do_center_crop UpperCAmelCase_ : str = crop_size UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Any = resample UpperCAmelCase_ : Tuple = rescale_factor UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase_ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> np.ndarray: UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ) if "shortest_edge" in size: UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCAmelCase_ : Tuple = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: str ,) -> np.ndarray: UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: float ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ) -> np.ndarray: return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> np.ndarray: return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Any ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: List[str] ,) -> BatchFeature: UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Dict = size if size is not None else self.size UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ ) if not is_batched(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [images] if not valid_images(lowerCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. UpperCAmelCase_ : Tuple = [to_numpy_array(lowerCamelCase_ ) for image in images] if do_resize: UpperCAmelCase_ : int = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images] if do_center_crop: UpperCAmelCase_ : Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images] if do_rescale: UpperCAmelCase_ : str = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images] if do_normalize: UpperCAmelCase_ : Dict = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images] UpperCAmelCase_ : Dict = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images] UpperCAmelCase_ : Tuple = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["ConditionalDetrFeatureExtractor"] __A = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
177
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "snap-research/efficientformer-l1-300": ( "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" ), } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "efficientformer" def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: List[str] = hidden_act lowercase__: Union[str, Any] = hidden_dropout_prob lowercase__: Union[str, Any] = hidden_sizes lowercase__: Any = num_hidden_layers lowercase__: int = num_attention_heads lowercase__: Dict = initializer_range lowercase__: Optional[int] = layer_norm_eps lowercase__: int = patch_size lowercase__: int = num_channels lowercase__: str = depths lowercase__: List[str] = mlp_expansion_ratio lowercase__: List[str] = downsamples lowercase__: List[str] = dim lowercase__: Optional[Any] = key_dim lowercase__: Union[str, Any] = attention_ratio lowercase__: Any = resolution lowercase__: Any = pool_size lowercase__: List[Any] = downsample_patch_size lowercase__: Optional[int] = downsample_stride lowercase__: Union[str, Any] = downsample_pad lowercase__: List[Any] = drop_path_rate lowercase__: Optional[Any] = num_metaad_blocks lowercase__: Any = distillation lowercase__: Optional[int] = use_layer_scale lowercase__: List[str] = layer_scale_init_value lowercase__: Dict = image_size lowercase__: List[str] = batch_norm_eps
177
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class a__ ( _A ): _SCREAMING_SNAKE_CASE : List[Any] = 'swin2sr' _SCREAMING_SNAKE_CASE : Tuple = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _UpperCamelCase=64 , _UpperCamelCase=1 , _UpperCamelCase=3 , _UpperCamelCase=180 , _UpperCamelCase=[6, 6, 6, 6, 6, 6] , _UpperCamelCase=[6, 6, 6, 6, 6, 6] , _UpperCamelCase=8 , _UpperCamelCase=2.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-5 , _UpperCamelCase=2 , _UpperCamelCase=1.0 , _UpperCamelCase="1conv" , _UpperCamelCase="pixelshuffle" , **_UpperCamelCase , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) _lowercase : List[str] = image_size _lowercase : Tuple = patch_size _lowercase : Dict = num_channels _lowercase : str = embed_dim _lowercase : Any = depths _lowercase : Dict = len(__SCREAMING_SNAKE_CASE ) _lowercase : Tuple = num_heads _lowercase : List[Any] = window_size _lowercase : List[str] = mlp_ratio _lowercase : Dict = qkv_bias _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : int = attention_probs_dropout_prob _lowercase : Optional[Any] = drop_path_rate _lowercase : Optional[Any] = hidden_act _lowercase : Any = use_absolute_embeddings _lowercase : List[Any] = layer_norm_eps _lowercase : Tuple = initializer_range _lowercase : Any = upscale _lowercase : Any = img_range _lowercase : List[str] = resi_connection _lowercase : List[str] = upsampler
363
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class a__ ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : int = 'Wav2Vec2FeatureExtractor' _SCREAMING_SNAKE_CASE : List[str] = 'AutoTokenizer' def __init__( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" super().__init__(_UpperCamelCase , _UpperCamelCase ) _lowercase : List[Any] = self.feature_extractor _lowercase : Optional[Any] = False @classmethod def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" try: return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , _UpperCamelCase , ) _lowercase : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) _lowercase : str = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase ) def __call__( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_UpperCamelCase , **_UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) _lowercase : int = kwargs.pop("raw_speech" ) else: _lowercase : List[Any] = kwargs.pop("audio" , _UpperCamelCase ) _lowercase : List[Any] = kwargs.pop("sampling_rate" , _UpperCamelCase ) _lowercase : Union[str, Any] = kwargs.pop("text" , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: _lowercase : int = args[0] _lowercase : Any = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: _lowercase : Dict = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase ) if text is not None: _lowercase : Union[str, Any] = self.tokenizer(_UpperCamelCase , **_UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowercase : int = encodings["input_ids"] return inputs def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase ) _lowercase : List[Any] = kwargs.pop("input_features" , _UpperCamelCase ) _lowercase : Any = kwargs.pop("labels" , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: _lowercase : Any = args[0] _lowercase : Any = args[1:] if input_features is not None: _lowercase : Any = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) if labels is not None: _lowercase : int = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: _lowercase : Optional[Any] = labels["input_ids"] return input_features def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @contextmanager def _lowerCamelCase ( self ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) _lowercase : Optional[Any] = True _lowercase : Dict = self.tokenizer yield _lowercase : List[str] = self.feature_extractor _lowercase : List[str] = False
199
0
from __future__ import annotations lowerCAmelCase__ : List[str] = [True] * 1_00_00_01 lowerCAmelCase__ : Optional[Any] = 2 while i * i <= 1_00_00_00: if seive[i]: for j in range(i * i, 1_00_00_01, i): lowerCAmelCase__ : Tuple = False i += 1 def UpperCamelCase__ ( A__ ) -> bool: return seive[n] def UpperCamelCase__ ( A__ ) -> bool: return any(digit in '02468' for digit in str(A__ ) ) def UpperCamelCase__ ( A__ = 100_0000 ) -> list[int]: snake_case__ : List[Any] = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(A__ ) and not contains_an_even_digit(A__ ): snake_case__ : Union[str, Any] = str(A__ ) snake_case__ : List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(A__ ) )] if all(is_prime(A__ ) for i in list_nums ): result.append(A__ ) return result def UpperCamelCase__ ( ) -> int: return len(find_circular_primes() ) if __name__ == "__main__": print(F'''{len(find_circular_primes()) = }''')
143
from abc import ABC, abstractmethod from argparse import ArgumentParser class __snake_case ( _lowerCamelCase ): @staticmethod @abstractmethod def __a ( __UpperCamelCase ) -> Dict: '''simple docstring''' raise NotImplementedError() @abstractmethod def __a ( self ) -> Optional[int]: '''simple docstring''' raise NotImplementedError()
143
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'moussaKam/mbarthez': 1024, 'moussaKam/barthez': 1024, 'moussaKam/barthez-orangesum-title': 1024, } lowerCAmelCase__ = '▁' class a_ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = ['input_ids', 'attention_mask'] UpperCAmelCase_ = BarthezTokenizer def __init__( self : int , lowercase__ : List[str]=None , lowercase__ : Any=None , lowercase__ : List[str]="<s>" , lowercase__ : Any="</s>" , lowercase__ : int="</s>" , lowercase__ : List[Any]="<s>" , lowercase__ : Dict="<unk>" , lowercase__ : Union[str, Any]="<pad>" , lowercase__ : Any="<mask>" , **lowercase__ : Union[str, Any] , ): '''simple docstring''' lowerCAmelCase__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token super().__init__( lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , ) lowerCAmelCase__ = vocab_file lowerCAmelCase__ = False if not self.vocab_file else True def __snake_case ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self : Any , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None): '''simple docstring''' lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __snake_case ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowercase__): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return lowerCAmelCase__ = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase__): copyfile(self.vocab_file , lowercase__) return (out_vocab_file,)
119
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer lowerCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast lowerCAmelCase__ = TaTokenizerFast lowerCAmelCase__ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys lowerCAmelCase__ = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
119
1
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , _A , _A , _A , **_A ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = feature_size SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = padding_value SCREAMING_SNAKE_CASE_ = kwargs.pop('''padding_side''' , '''right''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''return_attention_mask''' , _A ) super().__init__(**_A ) def _UpperCamelCase ( self , _A , _A = True , _A = None , _A = False , _A = None , _A = None , _A = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(_A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): SCREAMING_SNAKE_CASE_ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F''' to this method that includes {self.model_input_names[0]}, but you provided''' F''' {list(processed_features.keys() )}''' ) SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]] SCREAMING_SNAKE_CASE_ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(_A ) == 0: if return_attention_mask: SCREAMING_SNAKE_CASE_ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch SCREAMING_SNAKE_CASE_ = required_input[0] if isinstance(_A , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. SCREAMING_SNAKE_CASE_ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(_A ): SCREAMING_SNAKE_CASE_ = required_input[index][0] if return_tensors is None: if is_tf_tensor(_A ): SCREAMING_SNAKE_CASE_ = '''tf''' elif is_torch_tensor(_A ): SCREAMING_SNAKE_CASE_ = '''pt''' elif isinstance(_A , (int, float, list, tuple, np.ndarray) ): SCREAMING_SNAKE_CASE_ = '''np''' else: raise ValueError( F'''type of {first_element} unknown: {type(_A )}. ''' '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): SCREAMING_SNAKE_CASE_ = to_numpy(_A ) else: SCREAMING_SNAKE_CASE_ = [to_numpy(_A ) for v in value] # Convert padding_strategy in PaddingStrategy SCREAMING_SNAKE_CASE_ = self._get_padding_strategies(padding=_A , max_length=_A ) SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]] SCREAMING_SNAKE_CASE_ = len(_A ) if not all(len(_A ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) SCREAMING_SNAKE_CASE_ = [] for i in range(_A ): SCREAMING_SNAKE_CASE_ = {k: v[i] for k, v in processed_features.items()} # truncation SCREAMING_SNAKE_CASE_ = self._truncate( _A , max_length=_A , pad_to_multiple_of=_A , truncation=_A , ) truncated_inputs.append(_A ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length SCREAMING_SNAKE_CASE_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) SCREAMING_SNAKE_CASE_ = PaddingStrategy.MAX_LENGTH SCREAMING_SNAKE_CASE_ = {} for i in range(_A ): # padding SCREAMING_SNAKE_CASE_ = self._pad( truncated_inputs[i] , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , ) for key, value in outputs.items(): if key not in batch_outputs: SCREAMING_SNAKE_CASE_ = [] if value.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE_ = value.astype(np.floataa ) batch_outputs[key].append(_A ) return BatchFeature(_A , tensor_type=_A ) def _UpperCamelCase ( self , _A , _A = None , _A = PaddingStrategy.DO_NOT_PAD , _A = None , _A = None , ) -> dict: SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: SCREAMING_SNAKE_CASE_ = len(_A ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): SCREAMING_SNAKE_CASE_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of SCREAMING_SNAKE_CASE_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_A ) < max_length if return_attention_mask and "attention_mask" not in processed_features: SCREAMING_SNAKE_CASE_ = np.ones(len(_A ) , dtype=np.intaa ) if needs_to_be_padded: SCREAMING_SNAKE_CASE_ = max_length - len(_A ) if self.padding_side == "right": if return_attention_mask: SCREAMING_SNAKE_CASE_ = np.pad( processed_features['''attention_mask'''] , (0, difference) ) SCREAMING_SNAKE_CASE_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) SCREAMING_SNAKE_CASE_ = np.pad( _A , _A , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: SCREAMING_SNAKE_CASE_ = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) SCREAMING_SNAKE_CASE_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) SCREAMING_SNAKE_CASE_ = np.pad( _A , _A , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def _UpperCamelCase ( self , _A , _A = None , _A = None , _A = None , ) -> Any: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): SCREAMING_SNAKE_CASE_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of SCREAMING_SNAKE_CASE_ = len(_A ) > max_length if needs_to_be_truncated: SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: SCREAMING_SNAKE_CASE_ = processed_features['''attention_mask'''][:max_length] return processed_features def _UpperCamelCase ( self , _A=False , _A=None ) -> Optional[int]: # Get padding strategy if padding is not False: if padding is True: SCREAMING_SNAKE_CASE_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(_A , _A ): SCREAMING_SNAKE_CASE_ = PaddingStrategy(_A ) elif isinstance(_A , _A ): SCREAMING_SNAKE_CASE_ = padding else: SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
299
def A__ ( __lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
299
1
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=__lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["torch", "scipy"] def __init__( self , *_a , **_a ) -> List[Any]: requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def __lowercase ( cls , *_a , **_a ) -> Optional[int]: requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def __lowercase ( cls , *_a , **_a ) -> Any: requires_backends(cls , ['''torch''', '''scipy'''] )
15
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging a__ = '''\ ''' a__ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' a__ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _a : List[str] = '''cuda''' else: _a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' _a : Dict = AutoModelForCausalLM.from_pretrained(_a ) _a : List[Any] = model.to(_a ) _a : List[str] = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _a : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _a : List[Any] = model.config.max_length - 1 else: _a : List[str] = model.config.max_length _a : Union[str, Any] = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) _a : List[Any] = encodings['''input_ids'''] _a : int = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _a : Optional[int] = [] _a : Dict = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): _a : Dict = min(start_index + batch_size , len(_a ) ) _a : Union[str, Any] = encoded_texts[start_index:end_index] _a : int = attn_masks[start_index:end_index] if add_start_token: _a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) _a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _a : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) _a : Dict = encoded_batch with torch.no_grad(): _a : Any = model(_a , attention_mask=_a ).logits _a : List[str] = out_logits[..., :-1, :].contiguous() _a : Union[str, Any] = labels[..., 1:].contiguous() _a : Optional[int] = attn_mask[..., 1:].contiguous() _a : Union[str, Any] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
15
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a_ = logging.getLogger() def _a ( UpperCamelCase_ : int ) -> Dict: """simple docstring""" lowerCAmelCase__ = {} lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "all_results.json" ) if os.path.exists(UpperCamelCase_ ): with open(UpperCamelCase_ , "r" ) as f: lowerCAmelCase__ = json.load(UpperCamelCase_ ) else: raise ValueError(F"can't find {path}" ) return results a_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowercase__ ( _UpperCAmelCase ): def UpperCAmelCase ( self )-> str: '''simple docstring''' import xla_spawn lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ): lowerCAmelCase__ = time() xla_spawn.main() lowerCAmelCase__ = time() lowerCAmelCase__ = get_results(__UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' import xla_spawn lowerCAmelCase__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ): xla_spawn.main()
340
a_ = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
340
1
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class UpperCamelCase__ ( __lowercase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = BarthezTokenizer _SCREAMING_SNAKE_CASE : List[str] = BarthezTokenizerFast _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : Dict = True def lowerCAmelCase (self : Dict ): super().setUp() __a : Union[str, Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ ) __a : Any = tokenizer def lowerCAmelCase (self : Dict ): __a : Optional[int] = '''<pad>''' __a : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowerCAmelCase (self : Optional[int] ): __a : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(snake_case_ ) , 1_0_1_1_2_2 ) def lowerCAmelCase (self : Optional[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def lowerCAmelCase (self : List[str] ): __a : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __a : Optional[Any] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] __a : Optional[int] = self.tokenizer( snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors='''pt''' ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __a : str = batch.input_ids.tolist()[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowerCAmelCase (self : Dict ): if not self.test_rust_tokenizer: return __a : Dict = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer() __a : Tuple = '''I was born in 92000, and this is falsé.''' __a : Optional[int] = tokenizer.tokenize(snake_case_ ) __a : Optional[int] = rust_tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) __a : str = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) __a : Any = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) __a : Any = self.get_rust_tokenizer() __a : str = tokenizer.encode(snake_case_ ) __a : List[str] = rust_tokenizer.encode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCAmelCase (self : Optional[int] ): # fmt: off __a : Optional[int] = {'''input_ids''': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __a : Optional[int] = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=snake_case_ , )
371
def __UpperCamelCase ( lowerCAmelCase__ : list ): def merge(lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(lowerCAmelCase__ ) <= 1: return collection __a : str = len(lowerCAmelCase__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowercase__ =input('Enter numbers separated by a comma:\n').strip() lowercase__ =[int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
90
0
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = DebertaVaTokenizer UpperCAmelCase_ :int = DebertaVaTokenizerFast UpperCAmelCase_ :Optional[Any] = True UpperCAmelCase_ :List[Any] = True def __lowerCAmelCase ( self ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ :List[Any] = DebertaVaTokenizer(__A , unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , __A ) -> int: lowerCAmelCase_ :List[str] = """this is a test""" lowerCAmelCase_ :Union[str, Any] = """this is a test""" return input_text, output_text def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = """<pad>""" lowerCAmelCase_ :Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """[PAD]""" ) self.assertEqual(len(__A ) , 3_0001 ) def __lowerCAmelCase ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def __lowerCAmelCase ( self ) -> str: # fmt: off lowerCAmelCase_ :Union[str, Any] = """ \tHeLLo!how \n Are yoU? """ lowerCAmelCase_ :int = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""] # fmt: on lowerCAmelCase_ :List[str] = DebertaVaTokenizer(__A , do_lower_case=__A ) lowerCAmelCase_ :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Union[str, Any] = DebertaVaTokenizerFast(__A , do_lower_case=__A ) lowerCAmelCase_ :List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __lowerCAmelCase ( self ) -> Any: pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __lowerCAmelCase ( self ) -> int: pass def __lowerCAmelCase ( self ) -> Dict: # fmt: off lowerCAmelCase_ :List[str] = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase_ :str = DebertaVaTokenizer(__A , split_by_punct=__A ) lowerCAmelCase_ :str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Any = DebertaVaTokenizerFast(__A , split_by_punct=__A ) lowerCAmelCase_ :int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Any: # fmt: off lowerCAmelCase_ :Any = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :Tuple = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase_ :int = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Union[str, Any] = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: # fmt: off lowerCAmelCase_ :int = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCAmelCase_ :Tuple = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Any: # fmt: off lowerCAmelCase_ :Any = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase_ :List[Any] = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Optional[int] = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> str: # fmt: off lowerCAmelCase_ :Optional[int] = """ \tHeLLo!how \n Are yoU? """ lowerCAmelCase_ :List[Any] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""] # fmt: on lowerCAmelCase_ :Union[str, Any] = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :str = self.get_rust_tokenizer() lowerCAmelCase_ :List[str] = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) lowerCAmelCase_ :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :int = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[str] = self.get_rust_tokenizer() lowerCAmelCase_ :Dict = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = rust_tokenizer.encode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :str = """This is a test""" lowerCAmelCase_ :int = [13, 1, 4398, 25, 21, 1289] lowerCAmelCase_ :Optional[Any] = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""] lowerCAmelCase_ :str = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""] lowerCAmelCase_ :Any = DebertaVaTokenizer(__A , keep_accents=__A ) lowerCAmelCase_ :Optional[int] = DebertaVaTokenizerFast(__A , keep_accents=__A ) lowerCAmelCase_ :Union[str, Any] = tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Tuple = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :int = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = rust_tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[str] = rust_tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) # fmt: off lowerCAmelCase_ :Tuple = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :Optional[Any] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowerCAmelCase_ :List[Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ] lowerCAmelCase_ :Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCAmelCase_ :int = tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Any = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[Any] = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Optional[Any] = rust_tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = rust_tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[int] = DebertaVaTokenizer(__A ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" ) lowerCAmelCase_ :Dict = tokenizer.encode("""multi-sequence build""" ) lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __A , ) @slow def __lowerCAmelCase ( self ) -> Tuple: # fmt: off lowerCAmelCase_ :List[Any] = {"""input_ids""": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
84
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _lowerCamelCase : int = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ): '''simple docstring''' set_seed(0 ) _snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 ) _snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def A ( self : List[str] ): '''simple docstring''' _snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _snake_case = DDPMScheduler( num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , ) _snake_case = DDIMScheduler( num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) _snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )] _snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )] _snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )] # train with a DDPM scheduler _snake_case , _snake_case = self.get_model_optimizer(resolution=32 ) model.train().to(lowercase ) for i in range(4 ): optimizer.zero_grad() _snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _snake_case = model(lowercase , timesteps[i] ).sample _snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _snake_case , _snake_case = self.get_model_optimizer(resolution=32 ) model.train().to(lowercase ) for i in range(4 ): optimizer.zero_grad() _snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _snake_case = model(lowercase , timesteps[i] ).sample _snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
282
0
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case ( UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=100 , UpperCamelCase : Tuple=1026 , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , UpperCamelCase : Union[str, Any]="igf_context_pairs.jbl" , ): set_seed(3 ) # generate train_data and objective_set UpperCAmelCase , UpperCAmelCase : Dict = generate_datasets( _lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1026 , trim=_lowerCAmelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCAmelCase : List[str] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model UpperCAmelCase : Dict = load_gpta("""gpt2""" ).to(_lowerCAmelCase ) print("""computing perplexity on objective set""" ) UpperCAmelCase : Optional[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item() print("""perplexity on objective set:""" , _lowerCAmelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case ( UpperCamelCase : int , UpperCamelCase : Optional[int]=15 , UpperCamelCase : List[Any]=128 , UpperCamelCase : List[Any]=100 , UpperCamelCase : List[Any]="igf_model.pt" , ): set_seed(42 ) # Load pre-trained model UpperCAmelCase : Optional[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model UpperCAmelCase : Union[str, Any] = SecondaryLearner(_lowerCAmelCase ) # Train secondary learner UpperCAmelCase : Union[str, Any] = train_secondary_learner( _lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : List[str]=32 , UpperCamelCase : str=1000 , UpperCamelCase : Dict=16 , UpperCamelCase : Union[str, Any]=1.0 , UpperCamelCase : int=recopy_gpta , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]="gpt2_finetuned.pt" , ): UpperCAmelCase : List[str] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) UpperCAmelCase : Tuple = RandomSampler(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase ) UpperCAmelCase : Union[str, Any] = max_steps // (len(_lowerCAmelCase )) + 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) model.train() if secondary_learner is not None: secondary_learner.to(_lowerCAmelCase ) secondary_learner.eval() UpperCAmelCase : Optional[Any] = [] UpperCAmelCase : Dict = 0 UpperCAmelCase : List[str] = [] UpperCAmelCase : List[str] = [] # Compute the performance of the transformer model at the beginning UpperCAmelCase : List[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print("""Test perplexity, step""" , _lowerCAmelCase , """:""" , _lowerCAmelCase ) for epoch in range(int(_lowerCAmelCase ) ): for step, example in enumerate(_lowerCAmelCase ): torch.cuda.empty_cache() UpperCAmelCase : Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) UpperCAmelCase : Any = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCAmelCase : Tuple = model(_lowerCAmelCase , labels=_lowerCAmelCase ) UpperCAmelCase : int = True if secondary_learner is not None: UpperCAmelCase : Dict = secondary_learner.forward( torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_lowerCAmelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCAmelCase : Optional[Any] = -1 if predicted_q < threshold: UpperCAmelCase : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) UpperCAmelCase : Any = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCAmelCase : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCAmelCase : Optional[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print("""Test perplexity, step""" , _lowerCAmelCase , """:""" , _lowerCAmelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _lowerCAmelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case ( ): UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=_lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=_lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=_lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1000 , type=_lowerCAmelCase , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=_lowerCAmelCase , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=_lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=_lowerCAmelCase , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=_lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1026 , type=_lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=_lowerCAmelCase , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=_lowerCAmelCase , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_lowerCAmelCase , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner UpperCAmelCase : List[str] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner UpperCAmelCase : Optional[Any] = training_secondary_learner( _lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model UpperCAmelCase : Optional[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model UpperCAmelCase , UpperCAmelCase : Optional[int] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=_lowerCAmelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
354
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def _snake_case ( UpperCamelCase : str ): def decorator(UpperCamelCase : Optional[int] ): UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """handle_key""" , [] ) handle += [key] setattr(UpperCamelCase , """handle_key""" , UpperCamelCase ) return func return decorator def _snake_case ( *UpperCamelCase : List[str] ): def decorator(UpperCamelCase : Union[str, Any] ): UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """handle_key""" , [] ) handle += keys setattr(UpperCamelCase , """handle_key""" , UpperCamelCase ) return func return decorator class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase : List[Any] = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not hasattr(_SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(_SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(_SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): UpperCAmelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: UpperCAmelCase : Optional[int] = value return new_cls @staticmethod def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : str = get_character() if char != KEYMAP["undefined"]: UpperCAmelCase : List[Any] = ord(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = cls.key_handler.get(_SCREAMING_SNAKE_CASE ) if handler: UpperCAmelCase : int = char return handler(cls ) else: return None def _snake_case ( cls : Union[str, Any] ): return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
76
0
"""simple docstring""" class A_ : '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" # we need a list not a string, so do something to change the type UpperCAmelCase_ : Any = arr.split("," ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = [int(self.array[0] )] * len(self.array ) UpperCAmelCase_ : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): UpperCAmelCase_ : str = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) UpperCAmelCase_ : Dict = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": _a = input('please input some numbers:') _a = SubArray(whole_array) _a = array.solve_sub_array() print(('the results is:', re))
61
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ): """simple docstring""" UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = scope UpperCAmelCase_ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase_ : int = (image_size // patch_size) ** 2 UpperCAmelCase_ : Optional[Any] = num_patches + 2 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowercase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = self.type_sequence_label_size UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Dict = config_and_inputs UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Tuple = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : List[str] = False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = DeiTModelTester(self ) UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowercase_ ) UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : str = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ): """simple docstring""" UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" if not self.model_tester.is_training: return UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCAmelCase_ : Optional[int] = model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) UpperCAmelCase_ : Dict = model(**lowercase_ ).loss loss.backward() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCAmelCase_ : List[str] = model_class(lowercase_ ) model.gradient_checkpointing_enable() model.to(lowercase_ ) model.train() UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) UpperCAmelCase_ : Any = model(**lowercase_ ).loss loss.backward() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase_ ), *get_values(lowercase_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): UpperCAmelCase_ : str = problem_type["title"] UpperCAmelCase_ : List[Any] = problem_type["num_labels"] UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if problem_type["num_labels"] > 1: UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase_ ) as warning_list: UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __a ( ): UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A_ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( lowercase_ ) UpperCAmelCase_ : List[str] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowercase_ ) # verify the logits UpperCAmelCase_ : List[str] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) UpperCAmelCase_ : str = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" ) UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCAmelCase_ : int = model(lowercase_ )
61
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCAmelCase = logging.get_logger(__name__) class UpperCAmelCase_ ( _lowercase): def __init__( self : Optional[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ) -> None: warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
54
"""simple docstring""" from __future__ import annotations import math def lowercase ( a__ : int ) -> list[int]: if num <= 0: _UpperCamelCase = F'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(a__ ) _UpperCamelCase = [True] * (num + 1) _UpperCamelCase = [] _UpperCamelCase = 2 _UpperCamelCase = int(math.sqrt(a__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(a__ ) # Set multiples of start be False for i in range(start * start , num + 1 , a__ ): if sieve[i] is True: _UpperCamelCase = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(a__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
54
1
import logging from transformers.configuration_utils import PretrainedConfig UpperCamelCase_ = logging.getLogger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "masked_bert" def __init__( self: Optional[Any] ,lowerCamelCase_: Union[str, Any]=30522 ,lowerCamelCase_: Union[str, Any]=768 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Tuple=3072 ,lowerCamelCase_: Optional[Any]="gelu" ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Union[str, Any]=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: Any=1e-12 ,lowerCamelCase_: Optional[Any]=0 ,lowerCamelCase_: Dict="topK" ,lowerCamelCase_: str="constant" ,lowerCamelCase_: Optional[int]=0.0 ,**lowerCamelCase_: List[str] ,) -> str: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : int = pruning_method UpperCAmelCase_ : Optional[Any] = mask_init UpperCAmelCase_ : Tuple = mask_scale
345
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
1
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class __a ( A__ ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : List[Any] = params UpperCamelCase__ : Tuple = np.array(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = np.array([len(SCREAMING_SNAKE_CASE ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.lengths ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __lowercase ( self : Any ): '''simple docstring''' UpperCamelCase__ : int = self.params.max_model_input_size UpperCamelCase__ : str = self.lengths > max_len logger.info(F'Splitting {sum(SCREAMING_SNAKE_CASE )} too long sequences.' ) def divide_chunks(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ): return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )] UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : Dict = [] if self.params.mlm: UpperCamelCase__ , UpperCamelCase__ : Tuple = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: UpperCamelCase__ , UpperCamelCase__ : Tuple = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: UpperCamelCase__ : Tuple = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: UpperCamelCase__ : Optional[int] = np.insert(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE ) if sub_s[-1] != sep_id: UpperCamelCase__ : Optional[Any] = np.insert(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(SCREAMING_SNAKE_CASE ) new_tok_ids.extend(SCREAMING_SNAKE_CASE ) new_lengths.extend([len(SCREAMING_SNAKE_CASE ) for l in sub_seqs] ) UpperCamelCase__ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = np.array(SCREAMING_SNAKE_CASE ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCamelCase__ : str = len(self ) UpperCamelCase__ : Optional[int] = self.lengths > 11 UpperCamelCase__ : List[str] = self.token_ids[indices] UpperCamelCase__ : int = self.lengths[indices] UpperCamelCase__ : Any = len(self ) logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: UpperCamelCase__ : List[Any] = self.params.special_tok_ids["unk_token"] UpperCamelCase__ : Dict = len(self ) UpperCamelCase__ : Union[str, Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) UpperCamelCase__ : Dict = (unk_occs / self.lengths) < 0.5 UpperCamelCase__ : Union[str, Any] = self.token_ids[indices] UpperCamelCase__ : Optional[Any] = self.lengths[indices] UpperCamelCase__ : Any = len(self ) logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def __lowercase ( self : Dict ): '''simple docstring''' if not self.params.is_master: return logger.info(F'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' UpperCamelCase__ : Tuple = [t[0] for t in batch] UpperCamelCase__ : Any = [t[1] for t in batch] assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) # Max for paddings UpperCamelCase__ : Union[str, Any] = max(SCREAMING_SNAKE_CASE ) # Pad token ids if self.params.mlm: UpperCamelCase__ : List[Any] = self.params.special_tok_ids["pad_token"] else: UpperCamelCase__ : Union[str, Any] = self.params.special_tok_ids["unk_token"] UpperCamelCase__ : Tuple = [list(t.astype(SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE )) for t in token_ids] assert len(tk_ ) == len(SCREAMING_SNAKE_CASE ) assert all(len(SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ ) UpperCamelCase__ : int = torch.tensor(tk_ ) # (bs, max_seq_len_) UpperCamelCase__ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE ) # (bs) return tk_t, lg_t
196
lowerCamelCase : Optional[int] ={ '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[str]: UpperCamelCase__ : Optional[Any] = set() # keep track of all the paths to be checked UpperCamelCase__ : Optional[Any] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue UpperCamelCase__ : int = queue.pop(0 ) # get the last node from the path UpperCamelCase__ : Dict = path[-1] if node not in explored: UpperCamelCase__ : Tuple = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: UpperCamelCase__ : List[str] = list(__lowerCAmelCase ) new_path.append(__lowerCAmelCase ) queue.append(__lowerCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__lowerCAmelCase ) # in case there's no path between the 2 nodes return [] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 UpperCamelCase__ : Tuple = [start] UpperCamelCase__ : Optional[int] = set(__lowerCAmelCase ) # Keep tab on distances from `start` node. UpperCamelCase__ : str = {start: 0, target: -1} while queue: UpperCamelCase__ : Any = queue.pop(0 ) if node == target: UpperCamelCase__ : Union[str, Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__lowerCAmelCase ) queue.append(__lowerCAmelCase ) UpperCamelCase__ : List[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
196
1
"""simple docstring""" import re from filelock import FileLock try: import nltk _lowercase : Optional[int] = True except (ImportError, ModuleNotFoundError): _lowercase : Optional[Any] = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" re.sub('''<n>''' , '''''' , __lowerCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
238
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
59
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig __lowerCamelCase = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } __lowerCamelCase = logging.get_logger(__name__) class UpperCAmelCase ( A_ ): A__ : List[str] = "maskformer" A__ : Any = {"hidden_size": "mask_feature_size"} A__ : List[str] = ["resnet", "swin"] A__ : Any = ["detr"] def __init__(self : Dict , snake_case__ : int = 2_56 , snake_case__ : int = 2_56 , snake_case__ : float = 0.1 , snake_case__ : bool = False , snake_case__ : Optional[Dict] = None , snake_case__ : Optional[Dict] = None , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 20.0 , snake_case__ : Optional[bool] = None , **snake_case__ : Optional[Any] , ) -> Optional[int]: '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k snake_case : Dict = SwinConfig( image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(snake_case__ , snake_case__ ): snake_case : Any = backbone_config.pop("model_type" ) snake_case : Tuple = CONFIG_MAPPING[backbone_model_type] snake_case : int = config_class.from_dict(snake_case__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ f"""Supported model types: {','.join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 snake_case : Union[str, Any] = DetrConfig() else: # verify that the decoder is supported snake_case : List[Any] = ( decoder_config.pop("model_type" ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"""Transformer Decoder {decoder_type} not supported, please use one of""" f""" {','.join(self.decoders_supported )}""" ) if isinstance(snake_case__ , snake_case__ ): snake_case : List[str] = CONFIG_MAPPING[decoder_type] snake_case : str = config_class.from_dict(snake_case__ ) snake_case : List[str] = backbone_config snake_case : Tuple = decoder_config # main feature dimension for the model snake_case : str = fpn_feature_size snake_case : Any = mask_feature_size # initializer snake_case : List[Any] = init_std snake_case : List[str] = init_xavier_std # Hungarian matcher && loss snake_case : str = cross_entropy_weight snake_case : Any = dice_weight snake_case : Optional[Any] = mask_weight snake_case : Dict = use_auxiliary_loss snake_case : List[str] = no_object_weight snake_case : Optional[int] = output_auxiliary_logits snake_case : Tuple = self.decoder_config.encoder_attention_heads snake_case : Optional[int] = self.decoder_config.num_hidden_layers super().__init__(**snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Tuple ) -> Tuple: '''simple docstring''' return cls( backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict[str, any]: '''simple docstring''' snake_case : Dict = copy.deepcopy(self.__dict__ ) snake_case : str = self.backbone_config.to_dict() snake_case : Tuple = self.decoder_config.to_dict() snake_case : Optional[Any] = self.__class__.model_type return output
10
import fire from utils import calculate_rouge, save_json def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ): snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()] snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )] snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) if save_path is not None: save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
10
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
199
0
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def a_ ( *__lowercase : Any ) -> Optional[int]: with open(__lowercase , 'r' ) as fh: fcntl.flock(__lowercase , fcntl.LOCK_EX ) try: print(*__lowercase ) finally: fcntl.flock(__lowercase , fcntl.LOCK_UN ) _lowerCamelCase : Dict = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) _lowerCamelCase : Optional[Any] = torch.device('''cuda''', local_rank) _lowerCamelCase : Union[str, Any] = socket.gethostname() _lowerCamelCase : Optional[Any] = F'[{hostname}-{local_rank}]' try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank _lowerCamelCase : Any = dist.get_rank() _lowerCamelCase : Optional[Any] = dist.get_world_size() printflock(F'{gpu} is OK (global rank: {rank}/{world_size})') dist.barrier() if rank == 0: printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}') except Exception: printflock(F'{gpu} is broken') raise
370
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowerCamelCase : int = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : '''simple docstring''' _UpperCAmelCase : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _UpperCAmelCase : Optional[str] = field( default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _UpperCAmelCase : bool = field(default=UpperCAmelCase ,metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) @dataclass class SCREAMING_SNAKE_CASE__ : '''simple docstring''' _UpperCAmelCase : str = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,) _UpperCAmelCase : int = field( default=1_2_8 ,metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } ,) _UpperCAmelCase : bool = field( default=UpperCAmelCase ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) def a_ ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) _snake_case = import_module('tasks' ) try: _snake_case = getattr(__lowercase , model_args.task_type ) _snake_case = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _snake_case = token_classification_task.get_labels(data_args.labels ) _snake_case = dict(enumerate(__lowercase ) ) _snake_case = len(__lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , ) _snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _snake_case = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , ) # Get datasets _snake_case = ( TokenClassificationDataset( token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _snake_case = ( TokenClassificationDataset( token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__lowercase : np.ndarray , __lowercase : np.ndarray ) -> Tuple[List[int], List[int]]: _snake_case = np.argmax(__lowercase , axis=2 ) _snake_case , _snake_case = preds.shape _snake_case = [[] for _ in range(__lowercase )] _snake_case = [[] for _ in range(__lowercase )] for i in range(__lowercase ): for j in range(__lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__lowercase : EvalPrediction ) -> Dict: _snake_case , _snake_case = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__lowercase , __lowercase ), "precision": precision_score(__lowercase , __lowercase ), "recall": recall_score(__lowercase , __lowercase ), "f1": fa_score(__lowercase , __lowercase ), } # Data collator _snake_case = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _snake_case = Trainer( model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _snake_case = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _snake_case = trainer.evaluate() _snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(__lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __lowercase , __lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(__lowercase ) # Predict if training_args.do_predict: _snake_case = TokenClassificationDataset( token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _snake_case , _snake_case , _snake_case = trainer.predict(__lowercase ) _snake_case , _snake_case = align_predictions(__lowercase , __lowercase ) _snake_case = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(__lowercase , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , __lowercase , __lowercase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions _snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(__lowercase , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase ) return results def a_ ( __lowercase : Optional[Any] ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
130
0
from __future__ import annotations def UpperCamelCase ( snake_case__ : list[int] ) -> bool: return len(set(snake_case__ ) ) == len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
119
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def UpperCamelCase ( ) -> tuple[list[int], int]: UpperCamelCase : int = [randint(-1000 , 1000 ) for i in range(10 )] UpperCamelCase : Dict = randint(-5000 , 5000 ) return (arr, r) __UpperCAmelCase = make_dataset() def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, ...]: for triplet in permutations(snake_case__ , 3 ): if sum(snake_case__ ) == target: return tuple(sorted(snake_case__ ) ) return (0, 0, 0) def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, int, int]: arr.sort() UpperCamelCase : List[str] = len(snake_case__ ) for i in range(n - 1 ): UpperCamelCase , UpperCamelCase : Optional[Any] = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def UpperCamelCase ( ) -> tuple[float, float]: UpperCamelCase : Any = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' UpperCamelCase : Optional[Any] = '\ntriplet_sum1(*dataset)\n' UpperCamelCase : Dict = '\ntriplet_sum2(*dataset)\n' UpperCamelCase : Optional[int] = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 ) UpperCamelCase : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 ) return (min(snake_case__ ), min(snake_case__ )) if __name__ == "__main__": from doctest import testmod testmod() __UpperCAmelCase = solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
119
1
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["torch", "scipy"] def __init__( self : List[Any] ,*A : Any ,**A : int ): requires_backends(self ,["torch", "scipy"] ) @classmethod def UpperCamelCase_ ( cls : str ,*A : List[Any] ,**A : Tuple ): requires_backends(cls ,["torch", "scipy"] ) @classmethod def UpperCamelCase_ ( cls : str ,*A : Optional[int] ,**A : Tuple ): requires_backends(cls ,["torch", "scipy"] )
15
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE :str = 'RegNetConfig' # Base docstring SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat' SCREAMING_SNAKE_CASE :Optional[int] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,): super().__init__(**A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) __A = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : List[Any] ,A : Any ): __A = self.convolution(self.padding(A ) ) __A = self.normalization(A ) __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : RegNetConfig ,**A : str ): super().__init__(**A ) __A = config.num_channels __A = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,) def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = shape_list(A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __A = tf.transpose(A ,perm=(0, 2, 3, 1) ) __A = self.embedder(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ): super().__init__(**A ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" ) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ): return self.normalization(self.convolution(A ) ,training=A ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict ,A : int ,A : int ,**A : str ): super().__init__(**A ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) __A = [ tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ), tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ), ] def UpperCamelCase_ ( self : Dict ,A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __A = self.pooler(A ) for layer_module in self.attention: __A = layer_module(A ) __A = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : int ,A : Optional[int] ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict ,A : Any ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ): super().__init__(**A ) __A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __A = [ # downsampling is done in the first layer with stride of 2 layer(A ,A ,A ,stride=A ,name="layers.0" ), *[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Any ,A : List[str] ): for layer_module in self.layers: __A = layer_module(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ): super().__init__(**A ) __A = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) ) __A = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) ) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ): __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' snake_case_ = RegNetConfig def __init__( self : int ,A : Optional[int] ,**A : Dict ): super().__init__(**A ) __A = config __A = TFRegNetEmbeddings(A ,name="embedder" ) __A = TFRegNetEncoder(A ,name="encoder" ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) @unpack_inputs def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(A ,training=A ) __A = self.encoder( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = encoder_outputs[0] __A = self.pooler(A ) # Change to NCHW output format have uniformity in the modules __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = RegNetConfig snake_case_ = "regnet" snake_case_ = "pixel_values" @property def UpperCamelCase_ ( self : Optional[Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ): super().__init__(A ,*A ,**A ) __A = TFRegNetMainLayer(A ,name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ): super().__init__(A ,*A ,**A ) __A = config.num_labels __A = TFRegNetMainLayer(A ,name="regnet" ) # classification head __A = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier[0](A ) __A = self.classifier[1](A ) __A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A ) if not return_dict: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
15
1
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def _lowercase ( __A ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def _lowercase ( ): '''simple docstring''' with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" __UpperCamelCase = [1, 2, 3] with pytest.raises(__A ): with parallel_backend("""unsupported backend""" ): map_nested(__A ,__A ,num_proc=2 ) with pytest.raises(__A ): with parallel_backend("""unsupported backend""" ): map_nested(__A ,__A ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" ,[2, -1] ) def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = [1, 2] __UpperCamelCase = {"""a""": 1, """b""": 2} __UpperCamelCase = {"""a""": [1, 2], """b""": [3, 4]} __UpperCamelCase = {"""a""": {"""1""": 1}, """b""": 2} __UpperCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} __UpperCamelCase = [2, 3] __UpperCamelCase = {"""a""": 2, """b""": 3} __UpperCamelCase = {"""a""": [2, 3], """b""": [4, 5]} __UpperCamelCase = {"""a""": {"""1""": 2}, """b""": 3} __UpperCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(__A ,__A ,num_proc=__A ) == expected_map_nested_sa assert map_nested(__A ,__A ,num_proc=__A ) == expected_map_nested_sa assert map_nested(__A ,__A ,num_proc=__A ) == expected_map_nested_sa assert map_nested(__A ,__A ,num_proc=__A ) == expected_map_nested_sa assert map_nested(__A ,__A ,num_proc=__A ) == expected_map_nested_sa
243
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": a__ : Optional[int] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') a__ : Optional[int] = f'''https://www.google.com/search?q={query}&num=100''' a__ : Union[str, Any] = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: a__ : Optional[Any] = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: a__ : Union[str, Any] = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
243
1
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Any: '''simple docstring''' a__: Optional[Any] = parent a__: int = batch_size a__: Optional[Any] = seq_length a__: int = is_training a__: Optional[int] = use_input_mask a__: str = use_token_type_ids a__: int = use_labels a__: Any = vocab_size a__: Optional[Any] = hidden_size a__: str = num_hidden_layers a__: str = num_attention_heads a__: List[Any] = intermediate_size a__: List[str] = hidden_act a__: Dict = hidden_dropout_prob a__: str = attention_probs_dropout_prob a__: Any = max_position_embeddings a__: Dict = type_vocab_size a__: Any = type_sequence_label_size a__: List[Any] = initializer_range a__: Union[str, Any] = num_labels a__: Tuple = num_choices a__: Any = scope def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__: Dict = None if self.use_input_mask: a__: str = random_attention_mask([self.batch_size, self.seq_length]) a__: Dict = None if self.use_token_type_ids: a__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a__: Optional[int] = None a__: Any = None a__: Optional[Any] = None if self.use_labels: a__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices) a__: Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self) -> int: '''simple docstring''' return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int: '''simple docstring''' a__: Any = BioGptModel(config=lowerCamelCase__) model.to(lowerCamelCase__) model.eval() a__: List[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__) a__: Union[str, Any] = model(lowerCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str: '''simple docstring''' a__: str = BioGptForCausalLM(config=lowerCamelCase__) model.to(lowerCamelCase__) model.eval() a__: int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> str: '''simple docstring''' a__: Tuple = BioGptModel(config=lowerCamelCase__) model.to(lowerCamelCase__) model.eval() # create attention mask a__: List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__) a__: Dict = self.seq_length // 2 a__: Dict = 0 # first forward pass a__ , a__: Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__).to_tuple() # create hypothetical next token and extent to next_input_ids a__: Dict = ids_tensor((self.batch_size, 1) , config.vocab_size) # change a random masked slice from input_ids a__: Union[str, Any] = ids_tensor((1,) , lowerCamelCase__).item() + 1 a__: Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1) a__: str = random_other_next_tokens # append to next input_ids and attn_mask a__: int = torch.cat([input_ids, next_tokens] , dim=-1) a__: Optional[int] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase__)] , dim=1 , ) # get two different outputs a__: Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__)['last_hidden_state'] a__: Optional[Any] = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ , attention_mask=lowerCamelCase__)['last_hidden_state'] # select random slice a__: List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item() a__: List[str] = output_from_no_past[:, -1, random_slice_idx].detach() a__: Tuple = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> List[Any]: '''simple docstring''' a__: Any = BioGptModel(config=lowerCamelCase__).to(lowerCamelCase__).eval() a__: Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__) # first forward pass a__: int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__) a__ , a__: List[str] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids a__: List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size) a__: Any = ids_tensor((self.batch_size, 3) , 2) # append to next input_ids and a__: str = torch.cat([input_ids, next_tokens] , dim=-1) a__: Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1) a__: int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__)['last_hidden_state'] a__: Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__)[ 'last_hidden_state' ] # select random slice a__: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item() a__: Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() a__: str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False) -> Union[str, Any]: '''simple docstring''' a__: int = BioGptForCausalLM(lowerCamelCase__) model.to(lowerCamelCase__) if gradient_checkpointing: model.gradient_checkpointing_enable() a__: str = model(lowerCamelCase__ , labels=lowerCamelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def lowerCamelCase_ ( self , lowercase , *lowercase) -> List[Any]: '''simple docstring''' a__: Any = BioGptModel(lowerCamelCase__) a__: List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase) -> Tuple: '''simple docstring''' a__: Union[str, Any] = self.num_labels a__: Optional[Any] = BioGptForTokenClassification(lowerCamelCase__) model.to(lowerCamelCase__) model.eval() a__: int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: Tuple = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ): Any = config_and_inputs a__: Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): a__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) a__ = (BioGptForCausalLM,) if is_torch_available() else () a__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) a__ = False def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: int = BioGptModelTester(self) a__: Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37) def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self) -> Any: '''simple docstring''' a__: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__) def lowerCamelCase_ ( self) -> Any: '''simple docstring''' a__: List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a__: str = type self.model_tester.create_and_check_model(*lowerCamelCase__) def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase__) def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase__ , gradient_checkpointing=lowerCamelCase__) def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase__) def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase__) def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase__) @slow def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt') model.to(lowerCamelCase__) a__: Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt') a__: List[Any] = 'left' # Define PAD Token = EOS Token = 50256 a__: str = tokenizer.eos_token a__: str = model.config.eos_token_id # use different length sentences to test batching a__: Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] a__: Tuple = tokenizer(lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__) a__: int = inputs['input_ids'].to(lowerCamelCase__) a__: Tuple = model.generate( input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'].to(lowerCamelCase__) , ) a__: Any = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(lowerCamelCase__) a__: str = model.generate(input_ids=lowerCamelCase__) a__: Optional[int] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() a__: Tuple = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(lowerCamelCase__) a__: List[str] = model.generate(input_ids=lowerCamelCase__ , max_length=model.config.max_length - num_paddings) a__: List[Any] = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__) a__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__) a__: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__) a__: str = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__) self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence]) @slow def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__: List[Any] = BioGptModel.from_pretrained(lowerCamelCase__) self.assertIsNotNone(lowerCamelCase__) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__ , a__: int = self.model_tester.prepare_config_and_inputs_for_common() a__: Optional[int] = 3 a__: Any = input_dict['input_ids'] a__: Any = input_ids.ne(1).to(lowerCamelCase__) a__: str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) a__: List[Any] = BioGptForSequenceClassification(lowerCamelCase__) model.to(lowerCamelCase__) model.eval() a__: Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowerCamelCase_ ( self) -> Any: '''simple docstring''' a__ , a__: str = self.model_tester.prepare_config_and_inputs_for_common() a__: Any = 3 a__: str = 'multi_label_classification' a__: Optional[int] = input_dict['input_ids'] a__: Any = input_ids.ne(1).to(lowerCamelCase__) a__: Tuple = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) a__: Union[str, Any] = BioGptForSequenceClassification(lowerCamelCase__) model.to(lowerCamelCase__) model.eval() a__: Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: Tuple = BioGptForCausalLM.from_pretrained('microsoft/biogpt') a__: List[Any] = torch.tensor([[2, 48_05, 9, 6_56, 21]]) a__: str = model(lowerCamelCase__)[0] a__: str = 4_23_84 a__: Optional[Any] = torch.Size((1, 5, vocab_size)) self.assertEqual(output.shape , lowerCamelCase__) a__: Optional[Any] = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4)) @slow def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt') a__: Tuple = BioGptForCausalLM.from_pretrained('microsoft/biogpt') model.to(lowerCamelCase__) torch.manual_seed(0) a__: str = tokenizer('COVID-19 is' , return_tensors='pt').to(lowerCamelCase__) a__: Union[str, Any] = model.generate( **lowerCamelCase__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCamelCase__ , ) a__: Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__) a__: Dict = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__)
290
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency __A = { "E": 1_2.7_0, "T": 9.0_6, "A": 8.1_7, "O": 7.5_1, "I": 6.9_7, "N": 6.7_5, "S": 6.3_3, "H": 6.0_9, "R": 5.9_9, "D": 4.2_5, "L": 4.0_3, "C": 2.7_8, "U": 2.7_6, "M": 2.4_1, "W": 2.3_6, "F": 2.2_3, "G": 2.0_2, "Y": 1.9_7, "P": 1.9_3, "B": 1.2_9, "V": 0.9_8, "K": 0.7_7, "J": 0.1_5, "X": 0.1_5, "Q": 0.1_0, "Z": 0.0_7, } __A = "ETAOINSHRDLCUMWFGYPBVKJXQZ" __A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> dict[str, int]: """simple docstring""" __lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> str: """simple docstring""" return x[0] def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" __lowerCamelCase = get_letter_count(UpperCamelCase__ ) __lowerCamelCase = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase__ ) __lowerCamelCase = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase__ ) __lowerCamelCase = ''.join(freq_to_letter[freq] ) __lowerCamelCase = list(freq_to_letter_str.items() ) freq_pairs.sort(key=UpperCamelCase__ , reverse=UpperCamelCase__ ) __lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs] return "".join(UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : str ) -> int: """simple docstring""" __lowerCamelCase = get_frequency_order(UpperCamelCase__ ) __lowerCamelCase = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
90
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Tuple = logging.get_logger(__name__) _a : Any = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class __A ( __lowerCAmelCase ): _UpperCamelCase : int = '''distilbert''' _UpperCamelCase : Optional[Any] = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , a__=30522 , a__=512 , a__=False , a__=6 , a__=12 , a__=768 , a__=4 * 768 , a__=0.1 , a__=0.1 , a__="gelu" , a__=0.0_2 , a__=0.1 , a__=0.2 , a__=0 , **a__ , ): _lowerCAmelCase : Union[str, Any] = vocab_size _lowerCAmelCase : int = max_position_embeddings _lowerCAmelCase : List[str] = sinusoidal_pos_embds _lowerCAmelCase : Optional[Any] = n_layers _lowerCAmelCase : List[str] = n_heads _lowerCAmelCase : List[str] = dim _lowerCAmelCase : List[str] = hidden_dim _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : Optional[Any] = attention_dropout _lowerCAmelCase : int = activation _lowerCAmelCase : str = initializer_range _lowerCAmelCase : List[Any] = qa_dropout _lowerCAmelCase : List[str] = seq_classif_dropout super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ ) class __A ( __lowerCAmelCase ): @property def __A ( self ): if self.task == "multiple-choice": _lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
365
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : List[Any] = logging.get_logger(__name__) class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Tuple = "maskformer-swin" _UpperCamelCase : Union[str, Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=None , a__=None , **a__ , ): super().__init__(**a__ ) _lowerCAmelCase : Dict = image_size _lowerCAmelCase : List[str] = patch_size _lowerCAmelCase : Any = num_channels _lowerCAmelCase : int = embed_dim _lowerCAmelCase : Optional[Any] = depths _lowerCAmelCase : List[str] = len(a__ ) _lowerCAmelCase : List[Any] = num_heads _lowerCAmelCase : Tuple = window_size _lowerCAmelCase : List[Any] = mlp_ratio _lowerCAmelCase : Optional[Any] = qkv_bias _lowerCAmelCase : int = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : Tuple = use_absolute_embeddings _lowerCAmelCase : str = layer_norm_eps _lowerCAmelCase : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(a__ ) - 1) ) _lowerCAmelCase : int = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices( out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
126
0
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib SCREAMING_SNAKE_CASE__ = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } SCREAMING_SNAKE_CASE__ = logging.WARNING def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = os.getenv("""DATASETS_VERBOSITY""" , SCREAMING_SNAKE_CASE ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'Unknown option DATASETS_VERBOSITY={env_level_str}, ' F'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def UpperCAmelCase__ ( ): '''simple docstring''' return __name__.split(""".""" )[0] def UpperCAmelCase__ ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ): '''simple docstring''' if name is None: lowerCAmelCase = _get_library_name() return logging.getLogger(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ): '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = False def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowercase : def __init__( self , *lowercase , **lowercase ) -> List[Any]: # pylint: disable=unused-argument lowerCAmelCase = args[0] if args else None def __iter__( self ) -> Dict: return iter(self._iterator ) def __getattr__( self , lowercase ) -> int: def empty_fn(*lowercase , **lowercase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> Tuple: return self def __exit__( self , lowercase , lowercase , lowercase ) -> List[str]: return SCREAMING_SNAKE_CASE__ = True class lowercase : def __call__( self , *lowercase , lowercase=False , **lowercase ) -> Union[str, Any]: if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowercase , **lowercase ) else: return EmptyTqdm(*lowercase , **lowercase ) def _snake_case ( self , *lowercase , **lowercase ) -> Tuple: lowerCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowercase , **lowercase ) def _snake_case ( self ) -> List[Any]: if _tqdm_active: return tqdm_lib.tqdm.get_lock() SCREAMING_SNAKE_CASE__ = _tqdm_cls() def UpperCAmelCase__ ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def UpperCAmelCase__ ( ): '''simple docstring''' global _tqdm_active lowerCAmelCase = True def UpperCAmelCase__ ( ): '''simple docstring''' global _tqdm_active lowerCAmelCase = False
46
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size)) SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,)) SCREAMING_SNAKE_CASE : int = ya SCREAMING_SNAKE_CASE : int = xa for k in range(_a): SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k]) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
76
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''ClapFeatureExtractor''' UpperCamelCase__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self :Dict , __magic_name__ :Any , __magic_name__ :Optional[int] ): '''simple docstring''' super().__init__(__magic_name__ , __magic_name__ ) def __call__( self :Tuple , __magic_name__ :Union[str, Any]=None , __magic_name__ :List[Any]=None , __magic_name__ :str=None , **__magic_name__ :str ): '''simple docstring''' a = kwargs.pop("""sampling_rate""" , __magic_name__ ) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""" ) if text is not None: a = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if audios is not None: a = self.feature_extractor( __magic_name__ , sampling_rate=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is not None and audios is not None: a = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ ) def lowerCamelCase__ ( self :List[str] , *__magic_name__ :Optional[Any] , **__magic_name__ :Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase__ ( self :List[str] , *__magic_name__ :Dict , **__magic_name__ :Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.tokenizer.model_input_names a = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
347
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase ) a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: a = dataset_size < in_memory_max_size else: a = False a = is_small_dataset(__lowerCamelCase ) assert result == expected
347
1
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: debug_launcher(test_script.main ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: debug_launcher(test_ops.main )
54
"""simple docstring""" from __future__ import annotations import pandas as pd def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [0] * no_of_processes __SCREAMING_SNAKE_CASE = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = burst_time[i] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 9_9999_9999 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = False # Process until all processes are completed while complete != no_of_processes: for j in range(lowerCAmelCase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: __SCREAMING_SNAKE_CASE = remaining_time[j] __SCREAMING_SNAKE_CASE = j __SCREAMING_SNAKE_CASE = True if not check: increment_time += 1 continue remaining_time[short] -= 1 __SCREAMING_SNAKE_CASE = remaining_time[short] if minm == 0: __SCREAMING_SNAKE_CASE = 9_9999_9999 if remaining_time[short] == 0: complete += 1 __SCREAMING_SNAKE_CASE = False # Find finish time of current process __SCREAMING_SNAKE_CASE = increment_time + 1 # Calculate waiting time __SCREAMING_SNAKE_CASE = finish_time - arrival_time[short] __SCREAMING_SNAKE_CASE = finar - burst_time[short] if waiting_time[short] < 0: __SCREAMING_SNAKE_CASE = 0 # Increment time increment_time += 1 return waiting_time def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [0] * no_of_processes for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i] return turn_around_time def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i] __SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print("Average turn around time =" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('''Enter how many process you want to analyze''') a__ : Optional[Any] = int(input()) a__ : Optional[int] = [0] * no_of_processes a__ : int = [0] * no_of_processes a__ : List[Any] = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('''Enter the arrival time and burst time for process:--''' + str(i + 1)) a__ , a__ : Tuple = map(int, input().split()) a__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes) a__ : Dict = burst_time a__ : Any = no_of_processes a__ : Optional[int] = waiting_time a__ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) a__ : str = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ '''Process''', '''BurstTime''', '''ArrivalTime''', '''WaitingTime''', '''TurnAroundTime''', ], ) # Printing the dataFrame pd.set_option('''display.max_rows''', fcfs.shape[0] + 1) print(fcfs)
54
1
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCAmelCase__ = random.Random() def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=1.0 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None ) -> Optional[int]: '''simple docstring''' if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Union[str, Any] , A : Optional[Any] , A : Any=7 , A : str=4_00 , A : List[Any]=20_00 , A : List[Any]=1 , A : Any=0.0 , A : Tuple=1_60_00 , A : Optional[int]=True , A : str=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = feature_size _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize def _lowerCamelCase ( self : int) -> List[Any]: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowerCamelCase ( self : str , A : Optional[Any]=False , A : Any=False) -> Optional[Any]: """simple docstring""" def _flatten(A : Tuple): return list(itertools.chain(*A)) if equal_length: _UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size _UpperCAmelCase = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff) ] if numpify: _UpperCAmelCase = [np.asarray(A) for x in speech_inputs] return speech_inputs class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = WavaVecaFeatureExtractor def _lowerCamelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractionTester(self) def _lowerCamelCase ( self : str , A : str) -> Optional[int]: """simple docstring""" self.assertTrue(np.all(np.mean(A , axis=0) < 1E-3)) self.assertTrue(np.all(np.abs(np.var(A , axis=0) - 1) < 1E-3)) def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)] _UpperCAmelCase = [np.asarray(A) for speech_input in speech_inputs] # Test not batched input _UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np').input_values _UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values self.assertTrue(np.allclose(A , A , atol=1E-3)) # Test batched _UpperCAmelCase = feat_extract(A , return_tensors='np').input_values _UpperCAmelCase = feat_extract(A , return_tensors='np').input_values for enc_seq_a, enc_seq_a in zip(A , A): self.assertTrue(np.allclose(A , A , atol=1E-3)) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)] _UpperCAmelCase = np.asarray(A) _UpperCAmelCase = feat_extract(A , return_tensors='np').input_values _UpperCAmelCase = feat_extract(A , return_tensors='np').input_values for enc_seq_a, enc_seq_a in zip(A , A): self.assertTrue(np.allclose(A , A , atol=1E-3)) def _lowerCamelCase ( self : str) -> List[str]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)] _UpperCAmelCase = ['longest', 'max_length', 'do_not_pad'] _UpperCAmelCase = [None, 16_00, None] for max_length, padding in zip(A , A): _UpperCAmelCase = feat_extract(A , padding=A , max_length=A , return_tensors='np') _UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00]) self.assertTrue(input_values[0][8_00:].sum() < 1E-6) self._check_zero_mean_unit_variance(input_values[1][:10_00]) self.assertTrue(input_values[0][10_00:].sum() < 1E-6) self._check_zero_mean_unit_variance(input_values[2][:12_00]) def _lowerCamelCase ( self : Dict) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _UpperCAmelCase = range(8_00 , 14_00 , 2_00) _UpperCAmelCase = [floats_list((1, x))[0] for x in lengths] _UpperCAmelCase = ['longest', 'max_length', 'do_not_pad'] _UpperCAmelCase = [None, 16_00, None] for max_length, padding in zip(A , A): _UpperCAmelCase = feat_extract(A , max_length=A , padding=A) _UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00]) self._check_zero_mean_unit_variance(input_values[1][:10_00]) self._check_zero_mean_unit_variance(input_values[2][:12_00]) def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)] _UpperCAmelCase = feat_extract( A , truncation=A , max_length=10_00 , padding='max_length' , return_tensors='np') _UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)] _UpperCAmelCase = feat_extract( A , truncation=A , max_length=10_00 , padding='longest' , return_tensors='np') _UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00]) self._check_zero_mean_unit_variance(input_values[1, :10_00]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00)) _UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)] _UpperCAmelCase = feat_extract( A , truncation=A , max_length=20_00 , padding='longest' , return_tensors='np') _UpperCAmelCase = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00]) self._check_zero_mean_unit_variance(input_values[1, :10_00]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00)) @require_torch def _lowerCamelCase ( self : Tuple) -> int: """simple docstring""" import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _UpperCAmelCase = np.random.rand(1_00).astype(np.floataa) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np') self.assertTrue(np_processed.input_values.dtype == np.floataa) _UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt') self.assertTrue(pt_processed.input_values.dtype == torch.floataa) @slow @require_torch def _lowerCamelCase ( self : Optional[int]) -> Tuple: """simple docstring""" for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: _UpperCAmelCase = WavaVecaConfig.from_pretrained(A) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(A) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
356
def A ( _UpperCAmelCase : int ) -> "list[int]": '''simple docstring''' if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) _UpperCAmelCase = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 _UpperCAmelCase = 1 if upper_limit > 0: _UpperCAmelCase = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(_UpperCAmelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: UpperCAmelCase__ = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(f"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
290
0
import math def snake_case_ ( snake_case , snake_case = 0 , snake_case = 0 ) -> list: lowercase__: Any = end or len(snake_case ) for i in range(snake_case , snake_case ): lowercase__: Tuple = i lowercase__: List[str] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowercase__: List[str] = array[temp_index - 1] temp_index -= 1 lowercase__: Union[str, Any] = temp_index_value return array def snake_case_ ( snake_case , snake_case , snake_case ) -> None: # Max Heap lowercase__: Optional[Any] = index lowercase__: Any = 2 * index + 1 # Left Node lowercase__: List[Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowercase__: List[str] = left_index if right_index < heap_size and array[largest] < array[right_index]: lowercase__: List[Any] = right_index if largest != index: lowercase__ , lowercase__: Any = array[largest], array[index] heapify(snake_case , snake_case , snake_case ) def snake_case_ ( snake_case ) -> list: lowercase__: List[str] = len(snake_case ) for i in range(n // 2 , -1 , -1 ): heapify(snake_case , snake_case , snake_case ) for i in range(n - 1 , 0 , -1 ): lowercase__ , lowercase__: Dict = array[0], array[i] heapify(snake_case , 0 , snake_case ) return array def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> int: lowercase__: Tuple = low lowercase__: Optional[Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowercase__ , lowercase__: Union[str, Any] = array[j], array[i] i += 1 def snake_case_ ( snake_case ) -> list: if len(snake_case ) == 0: return array lowercase__: List[Any] = 2 * math.ceil(math.loga(len(snake_case ) ) ) lowercase__: Optional[int] = 16 return intro_sort(snake_case , 0 , len(snake_case ) , snake_case , snake_case ) def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(snake_case ) max_depth -= 1 lowercase__: List[str] = median_of_a(snake_case , snake_case , start + ((end - start) // 2) + 1 , end - 1 ) lowercase__: Union[str, Any] = partition(snake_case , snake_case , snake_case , snake_case ) intro_sort(snake_case , snake_case , snake_case , snake_case , snake_case ) lowercase__: Union[str, Any] = p return insertion_sort(snake_case , snake_case , snake_case ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase = input('''Enter numbers separated by a comma : ''').strip() __lowerCAmelCase = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
196
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __lowerCAmelCase = logging.get_logger(__name__) def snake_case_ ( snake_case , snake_case ) -> List[str]: lowercase__: List[str] = set() lowercase__: List[Any] = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): lowercase__: Optional[Any] = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: lowercase__: List[str] = '\n'.join(snake_case ) # Only keep the warnings specified in `targets` if any(f': {x}: ' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: lowercase__: Union[str, Any] = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): lowercase__: Dict = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def snake_case_ ( snake_case , snake_case ) -> Any: lowercase__: Optional[Any] = set() lowercase__: int = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def snake_case_ ( snake_case ) -> str: return values.split(',' ) __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __lowerCAmelCase = extract_warnings(args.output_dir, args.targets) __lowerCAmelCase = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
196
1
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __A ( a_ :int = 3) -> qiskit.result.counts.Counts: if isinstance(a_ , a_): raise TypeError('''number of qubits must be a integer.''') if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''') if math.floor(a_) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''') if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''') __a : Optional[int] = QuantumRegister(a_ , '''qr''') __a : int = ClassicalRegister(a_ , '''cr''') __a : Optional[Any] = QuantumCircuit(a_ , a_) __a : Dict = number_of_qubits for i in range(a_): quantum_circuit.h(number_of_qubits - i - 1) counter -= 1 for j in range(a_): quantum_circuit.cp(np.pi / 2 ** (counter - j) , a_ , a_) for k in range(number_of_qubits // 2): quantum_circuit.swap(a_ , number_of_qubits - k - 1) # measure all the qubits quantum_circuit.measure(a_ , a_) # simulate with 10000 shots __a : List[str] = Aer.get_backend('''qasm_simulator''') __a : int = execute(a_ , a_ , shots=1_00_00) return job.result().get_counts(a_) if __name__ == "__main__": print( F'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
188
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowercase ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _lowerCamelCase ( self , _UpperCAmelCase=0 ): __a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) ) __a : Any = np.random.RandomState(_UpperCAmelCase ) __a : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _lowerCamelCase ( self ): __a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : Dict = self.get_dummy_inputs() __a : Any = pipe(**_UpperCAmelCase ).images __a : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _lowerCamelCase ( self ): __a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : Optional[int] = self.get_dummy_inputs() __a : Optional[Any] = pipe(**_UpperCAmelCase ).images __a : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCamelCase ( self ): __a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) # warmup pass to apply optimizations __a : Any = pipe(**self.get_dummy_inputs() ) __a : List[str] = self.get_dummy_inputs() __a : Tuple = pipe(**_UpperCAmelCase ).images __a : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCamelCase ( self ): __a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : List[Any] = self.get_dummy_inputs() __a : Any = pipe(**_UpperCAmelCase ).images __a : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCamelCase ( self ): __a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : Union[str, Any] = self.get_dummy_inputs() __a : str = pipe(**_UpperCAmelCase ).images __a : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCamelCase ( self ): __a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : Optional[int] = self.get_dummy_inputs() __a : Optional[Any] = pipe(**_UpperCAmelCase ).images __a : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __lowercase ( unittest.TestCase ): '''simple docstring''' @property def _lowerCamelCase ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowerCamelCase ( self ): __a : Optional[Any] = ort.SessionOptions() __a : Any = False return options def _lowerCamelCase ( self ): __a : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __a : Tuple = init_image.resize((768, 512) ) # using the PNDM scheduler by default __a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : Tuple = '''A fantasy landscape, trending on artstation''' __a : Tuple = np.random.RandomState(0 ) __a : int = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , ) __a : List[Any] = output.images __a : int = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _lowerCamelCase ( self ): __a : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __a : Tuple = init_image.resize((768, 512) ) __a : str = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : List[str] = '''A fantasy landscape, trending on artstation''' __a : str = np.random.RandomState(0 ) __a : str = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , ) __a : Dict = output.images __a : List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
188
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig __A = { "facebook/maskformer-swin-base-ade": ( "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } __A = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "maskformer" lowercase_ = {"hidden_size": "mask_feature_size"} lowercase_ = ["resnet", "swin"] lowercase_ = ["detr"] def __init__(self : Optional[int] , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 20.0 , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Optional[Any] , ) ->str: '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowerCamelCase__: Any =SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: List[str] =backbone_config.pop("model_type") lowerCamelCase__: int =CONFIG_MAPPING[backbone_model_type] lowerCamelCase__: List[Any] =config_class.from_dict(UpperCAmelCase_) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ F"""Supported model types: {",".join(self.backbones_supported)}""") if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowerCamelCase__: Tuple =DetrConfig() else: # verify that the decoder is supported lowerCamelCase__: Any =( decoder_config.pop("model_type") if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"""Transformer Decoder {decoder_type} not supported, please use one of""" F""" {",".join(self.decoders_supported)}""") if isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Union[str, Any] =CONFIG_MAPPING[decoder_type] lowerCamelCase__: List[Any] =config_class.from_dict(UpperCAmelCase_) lowerCamelCase__: List[str] =backbone_config lowerCamelCase__: Dict =decoder_config # main feature dimension for the model lowerCamelCase__: Any =fpn_feature_size lowerCamelCase__: Optional[int] =mask_feature_size # initializer lowerCamelCase__: Union[str, Any] =init_std lowerCamelCase__: str =init_xavier_std # Hungarian matcher && loss lowerCamelCase__: Tuple =cross_entropy_weight lowerCamelCase__: Union[str, Any] =dice_weight lowerCamelCase__: List[str] =mask_weight lowerCamelCase__: List[str] =use_auxiliary_loss lowerCamelCase__: List[str] =no_object_weight lowerCamelCase__: Dict =output_auxiliary_logits lowerCamelCase__: Any =self.decoder_config.encoder_attention_heads lowerCamelCase__: List[Any] =self.decoder_config.num_hidden_layers super().__init__(**UpperCAmelCase_) @classmethod def SCREAMING_SNAKE_CASE_ (cls : List[str] , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : List[str]) ->int: '''simple docstring''' return cls( backbone_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , **UpperCAmelCase_ , ) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict[str, any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =copy.deepcopy(self.__dict__) lowerCamelCase__: Any =self.backbone_config.to_dict() lowerCamelCase__: Any =self.decoder_config.to_dict() lowerCamelCase__: List[str] =self.__class__.model_type return output
10
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __A = "." if __name__ == "__main__": __A = os.path.join(REPO_PATH, "utils/documentation_tests.txt") __A = [] __A = [] with open(doctest_file_path) as fp: for line in fp: __A = line.strip() __A = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __A = "\n".join(non_existent_paths) raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
10
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
59
import random from typing import Any def lowerCamelCase_ ( _a : list ): '''simple docstring''' for _ in range(len(_a ) ): UpperCAmelCase_ : List[str] = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : Any = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
59
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __A : def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ): _lowerCAmelCase : Tuple = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : List[str] = seq_length _lowerCAmelCase : Union[str, Any] = is_training _lowerCAmelCase : Dict = use_input_mask _lowerCAmelCase : List[str] = use_token_type_ids _lowerCAmelCase : Tuple = use_labels _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : int = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : str = num_attention_heads _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Optional[int] = hidden_act _lowerCAmelCase : Tuple = hidden_dropout_prob _lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob _lowerCAmelCase : Optional[int] = max_position_embeddings _lowerCAmelCase : List[str] = type_vocab_size _lowerCAmelCase : Optional[int] = type_sequence_label_size _lowerCAmelCase : List[Any] = initializer_range _lowerCAmelCase : List[Any] = num_labels _lowerCAmelCase : Union[str, Any] = num_choices _lowerCAmelCase : Dict = scope def __A ( self ): _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : List[Any] = None if self.use_input_mask: _lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : Any = None if self.use_token_type_ids: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase : Dict = None _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , ) def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ): _lowerCAmelCase : Dict = LlamaModel(config=a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ ) _lowerCAmelCase : str = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ): _lowerCAmelCase : int = True _lowerCAmelCase : Optional[Any] = LlamaModel(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : Union[str, Any] = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , ) _lowerCAmelCase : Union[str, Any] = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , ) _lowerCAmelCase : Dict = model(a__ , attention_mask=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ): _lowerCAmelCase : int = LlamaForCausalLM(config=a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ): _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : Tuple = LlamaForCausalLM(config=a__ ) model.to(a__ ) model.eval() # first forward pass _lowerCAmelCase : Any = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , ) _lowerCAmelCase : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowerCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) _lowerCAmelCase : Tuple = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )["hidden_states"][0] _lowerCAmelCase : Dict = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["hidden_states"][0] # select random slice _lowerCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) ) def __A ( self ): _lowerCAmelCase : Any = self.prepare_config_and_inputs() ( _lowerCAmelCase ) : Dict = config_and_inputs _lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): _UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else () _UpperCamelCase : str = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Optional[int] = False def __A ( self ): _lowerCAmelCase : List[Any] = LlamaModelTester(self ) _lowerCAmelCase : Tuple = ConfigTester(self , config_class=a__ , hidden_size=37 ) def __A ( self ): self.config_tester.run_common_tests() def __A ( self ): _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def __A ( self ): _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : Tuple = type self.model_tester.create_and_check_model(*a__ ) def __A ( self ): _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : List[Any] = 3 _lowerCAmelCase : List[Any] = input_dict["input_ids"] _lowerCAmelCase : str = input_ids.ne(1 ).to(a__ ) _lowerCAmelCase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowerCAmelCase : List[str] = LlamaForSequenceClassification(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ , labels=a__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ): _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : str = 3 _lowerCAmelCase : List[Any] = "single_label_classification" _lowerCAmelCase : Union[str, Any] = input_dict["input_ids"] _lowerCAmelCase : List[Any] = input_ids.ne(1 ).to(a__ ) _lowerCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ , labels=a__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ): _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : List[str] = 3 _lowerCAmelCase : Union[str, Any] = "multi_label_classification" _lowerCAmelCase : Any = input_dict["input_ids"] _lowerCAmelCase : str = input_ids.ne(1 ).to(a__ ) _lowerCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _lowerCAmelCase : Tuple = LlamaForSequenceClassification(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , labels=a__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , a__ ): _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size ) _lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase : Optional[int] = LlamaModel(a__ ) original_model.to(a__ ) original_model.eval() _lowerCAmelCase : str = original_model(a__ ).last_hidden_state _lowerCAmelCase : List[Any] = original_model(a__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase : Dict = {"type": scaling_type, "factor": 1_0.0} _lowerCAmelCase : Any = LlamaModel(a__ ) scaled_model.to(a__ ) scaled_model.eval() _lowerCAmelCase : Tuple = scaled_model(a__ ).last_hidden_state _lowerCAmelCase : Optional[Any] = scaled_model(a__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) ) @require_torch class __A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ): _lowerCAmelCase : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) _lowerCAmelCase : List[str] = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 _lowerCAmelCase : int = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _lowerCAmelCase : Union[str, Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ): _lowerCAmelCase : str = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) _lowerCAmelCase : Tuple = model(torch.tensor(a__ ) ) # Expected mean on dim = -1 _lowerCAmelCase : List[Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _lowerCAmelCase : Dict = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ): _lowerCAmelCase : str = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) _lowerCAmelCase : Tuple = model(torch.tensor(a__ ) ) # Expected mean on dim = -1 _lowerCAmelCase : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _lowerCAmelCase : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ): _lowerCAmelCase : str = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) _lowerCAmelCase : Union[str, Any] = model(torch.tensor(a__ ) ) _lowerCAmelCase : Dict = torch.tensor( [[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 ) # fmt: off _lowerCAmelCase : Any = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ): _lowerCAmelCase : Tuple = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" _lowerCAmelCase : Union[str, Any] = "Simply put, the theory of relativity states that " _lowerCAmelCase : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) _lowerCAmelCase : List[str] = tokenizer.encode(a__ , return_tensors="""pt""" ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=a__ ) # greedy generation outputs _lowerCAmelCase : Tuple = model.generate(a__ , max_new_tokens=64 , top_p=a__ , temperature=1 , do_sample=a__ ) _lowerCAmelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ ) self.assertEqual(a__ , a__ )
44
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = ['''model.decoder.embed_positions.weights'''] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "emb" in name: lowercase__ : int = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: lowercase__ : Any = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: lowercase__ : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: lowercase__ : int = name.replace("linear1" , "fc1" ) if "linear2" in name: lowercase__ : int = name.replace("linear2" , "fc2" ) if "norm1" in name: lowercase__ : Union[str, Any] = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: lowercase__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: lowercase__ : Dict = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: lowercase__ : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: lowercase__ : Union[str, Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: lowercase__ : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[Any] = list(state_dict.keys() ) lowercase__ : Dict = {} for key in keys: lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ ) lowercase__ : Union[str, Any] = rename_keys(lowerCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj lowercase__ : Optional[int] = val[:hidden_size, :] lowercase__ : Optional[int] = val[hidden_size : 2 * hidden_size, :] lowercase__ : List[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowercase__ : Union[str, Any] = val else: lowercase__ : List[Any] = val return state_dict, enc_dec_proj_state_dict def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if checkpoint == "small": # default config values lowercase__ : Optional[Any] = 1_024 lowercase__ : int = 24 lowercase__ : Optional[Any] = 16 elif checkpoint == "medium": lowercase__ : str = 1_536 lowercase__ : Union[str, Any] = 48 lowercase__ : Optional[int] = 24 elif checkpoint == "large": lowercase__ : Tuple = 2_048 lowercase__ : Union[str, Any] = 48 lowercase__ : Dict = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) lowercase__ : int = MusicgenDecoderConfig( hidden_size=lowerCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , ) return config @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ): """simple docstring""" lowercase__ : List[Any] = MusicGen.get_pretrained(lowerCamelCase__ , device=lowerCamelCase__ ) lowercase__ : str = decoder_config_from_checkpoint(lowerCamelCase__ ) lowercase__ : Optional[Any] = fairseq_model.lm.state_dict() lowercase__ , lowercase__ : Tuple = rename_state_dict( lowerCamelCase__ , hidden_size=decoder_config.hidden_size ) lowercase__ : str = TaEncoderModel.from_pretrained("t5-base" ) lowercase__ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" ) lowercase__ : List[str] = MusicgenForCausalLM(lowerCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowercase__ , lowercase__ : List[str] = decoder.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(lowerCamelCase__ ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model lowercase__ : Any = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase__ , audio_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowerCamelCase__ ) # check we can do a forward pass lowercase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowercase__ : Any = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowercase__ : List[str] = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits if logits.shape != (8, 1, 2_048): raise ValueError("Incorrect shape for logits" ) # now construct the processor lowercase__ : List[Any] = AutoTokenizer.from_pretrained("t5-base" ) lowercase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) lowercase__ : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ ) # set the appropriate bos/pad token ids lowercase__ : List[Any] = 2_048 lowercase__ : List[Any] = 2_048 # set other default generation config params lowercase__ : str = int(30 * audio_encoder.config.frame_rate ) lowercase__ : List[Any] = True lowercase__ : Dict = 3.0 if pytorch_dump_folder is not None: Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(lowerCamelCase__ ) processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase__ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
130
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _a ( _lowerCAmelCase ): A = (DPMSolverSDEScheduler,) A = 10 def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> str: UpperCAmelCase_: Union[str, Any] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def __snake_case (self ) -> str: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> List[Any]: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1], [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_, beta_end=SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Optional[int]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Any: UpperCAmelCase_: str = self.scheduler_classes[0] UpperCAmelCase_: Any = self.get_scheduler_config() UpperCAmelCase_: List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase_: int = self.dummy_model() UpperCAmelCase_: Any = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase_: Any = sample.to(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase_: str = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Tuple = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[str] = output.prev_sample UpperCAmelCase_: Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def __snake_case (self ) -> str: UpperCAmelCase_: Tuple = self.scheduler_classes[0] UpperCAmelCase_: Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCAmelCase_: Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase_: int = self.dummy_model() UpperCAmelCase_: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase_: List[Any] = sample.to(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase_: Optional[int] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: int = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: str = output.prev_sample UpperCAmelCase_: Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3 def __snake_case (self ) -> List[Any]: UpperCAmelCase_: Tuple = self.scheduler_classes[0] UpperCAmelCase_: Any = self.get_scheduler_config() UpperCAmelCase_: Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps, device=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Tuple = self.dummy_model() UpperCAmelCase_: str = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase_: Dict = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: str = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[str] = output.prev_sample UpperCAmelCase_: Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def __snake_case (self ) -> Any: UpperCAmelCase_: List[str] = self.scheduler_classes[0] UpperCAmelCase_: Tuple = self.get_scheduler_config() UpperCAmelCase_: Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_, use_karras_sigmas=SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps, device=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[Any] = self.dummy_model() UpperCAmelCase_: List[Any] = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma UpperCAmelCase_: Dict = sample.to(SCREAMING_SNAKE_CASE_ ) for t in scheduler.timesteps: UpperCAmelCase_: int = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = output.prev_sample UpperCAmelCase_: Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
82
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a ( _lowerCAmelCase ): def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """embed_dim""" ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """num_heads""" ) ) class _a : def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=[16, 48, 96], SCREAMING_SNAKE_CASE_=[1, 3, 6], SCREAMING_SNAKE_CASE_=[1, 2, 10], SCREAMING_SNAKE_CASE_=[7, 3, 3], SCREAMING_SNAKE_CASE_=[4, 2, 2], SCREAMING_SNAKE_CASE_=[2, 1, 1], SCREAMING_SNAKE_CASE_=[2, 2, 2], SCREAMING_SNAKE_CASE_=[False, False, True], SCREAMING_SNAKE_CASE_=[0.0, 0.0, 0.0], SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=2, ) -> List[Any]: UpperCAmelCase_: Union[str, Any] = parent UpperCAmelCase_: Any = batch_size UpperCAmelCase_: Optional[int] = image_size UpperCAmelCase_: Tuple = patch_sizes UpperCAmelCase_: int = patch_stride UpperCAmelCase_: int = patch_padding UpperCAmelCase_: List[str] = is_training UpperCAmelCase_: List[Any] = use_labels UpperCAmelCase_: int = num_labels UpperCAmelCase_: Dict = num_channels UpperCAmelCase_: Any = embed_dim UpperCAmelCase_: Optional[Any] = num_heads UpperCAmelCase_: Dict = stride_kv UpperCAmelCase_: Dict = depth UpperCAmelCase_: Optional[Any] = cls_token UpperCAmelCase_: List[str] = attention_drop_rate UpperCAmelCase_: List[str] = initializer_range UpperCAmelCase_: Tuple = layer_norm_eps def __snake_case (self ) -> Dict: UpperCAmelCase_: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_: Optional[Any] = None if self.use_labels: # create a random int32 tensor of given shape UpperCAmelCase_: str = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase_: List[str] = self.get_config() return config, pixel_values, labels def __snake_case (self ) -> Tuple: return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: UpperCAmelCase_: Optional[int] = TFCvtModel(config=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_: Any = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_: str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str: UpperCAmelCase_: List[str] = self.num_labels UpperCAmelCase_: Tuple = TFCvtForImageClassification(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __snake_case (self ) -> Dict: UpperCAmelCase_: Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = config_and_inputs UpperCAmelCase_: Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): A = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) A = False A = False A = False A = False A = False def __snake_case (self ) -> int: UpperCAmelCase_: Tuple = TFCvtModelTester(self ) UpperCAmelCase_: Dict = TFCvtConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 ) def __snake_case (self ) -> List[Any]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def __snake_case (self ) -> Optional[int]: pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def __snake_case (self ) -> List[str]: pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def __snake_case (self ) -> Dict: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", ) def __snake_case (self ) -> Optional[int]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", ) @slow def __snake_case (self ) -> int: super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def __snake_case (self ) -> List[Any]: UpperCAmelCase_: List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(SCREAMING_SNAKE_CASE_ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def __snake_case (self ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_: List[str] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_: Any = [*signature.parameters.keys()] UpperCAmelCase_: Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Any: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: Optional[Any] = outputs.hidden_states UpperCAmelCase_: Optional[int] = len(self.model_tester.depth ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_: int = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_: Tuple = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> int: UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @slow def __snake_case (self ) -> Optional[int]: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_: Union[str, Any] = TFCvtModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def __snake_case (self ) -> Tuple: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __snake_case (self ) -> Dict: UpperCAmelCase_: Tuple = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_: Dict = self.default_image_processor UpperCAmelCase_: Dict = prepare_img() UpperCAmelCase_: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""tf""" ) # forward pass UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCAmelCase_: Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Dict = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
82
1
from PIL import Image def a_ ( lowerCAmelCase_ : Image, lowerCAmelCase_ : float ): def brightness(lowerCAmelCase_ : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 _snake_case : Dict = change_brightness(img, 100) brigt_img.save('image_data/lena_brightness.png', format='png')
284
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ = 16 UpperCAmelCase_ = 32 def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained(A__ ) __lowerCamelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(A__ : int ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowerCamelCase = datasets.map( A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(A__ : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) __lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ): '''simple docstring''' model.eval() __lowerCamelCase = 0 for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**A__ ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __lowerCamelCase, __lowerCamelCase = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A__ ) - 1: __lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A__ , references=A__ , ) __lowerCamelCase = metric.compute() return eval_metric["accuracy"] def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config["""lr"""] __lowerCamelCase = int(config["""num_epochs"""] ) __lowerCamelCase = int(config["""seed"""] ) __lowerCamelCase = int(config["""batch_size"""] ) __lowerCamelCase = args.model_name_or_path set_seed(A__ ) __lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ ) # Instantiate optimizer __lowerCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ ) if accelerator.state.deepspeed_plugin is not None: __lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: __lowerCamelCase = 1 __lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , ) else: __lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # We need to keep track of how many total steps we have iterated over __lowerCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __lowerCamelCase = 0 __lowerCamelCase = evaluate.load("""glue""" , """mrpc""" ) __lowerCamelCase = num_epochs if args.partial_train_epoch is not None: __lowerCamelCase = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1] __lowerCamelCase = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __lowerCamelCase = int(A__ ) + 1 __lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ ) accelerator.print("""resumed checkpoint performance:""" , A__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f: __lowerCamelCase = json.load(A__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __lowerCamelCase = {} for epoch in range(A__ , A__ ): model.train() for step, batch in enumerate(A__ ): __lowerCamelCase = model(**A__ ) __lowerCamelCase = outputs.loss __lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __lowerCamelCase = f'epoch_{epoch}' __lowerCamelCase = os.path.join(args.output_dir , A__ ) accelerator.save_state(A__ ) __lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ ) __lowerCamelCase = accuracy __lowerCamelCase = lr_scheduler.get_lr()[0] __lowerCamelCase = optimizer.param_groups[0]["""lr"""] __lowerCamelCase = epoch __lowerCamelCase = overall_step accelerator.print(f'epoch {epoch}:' , A__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f: json.dump(A__ , A__ ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , ) parser.add_argument( """--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(A__ , A__ ) if __name__ == "__main__": main()
12
0
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Initialise PyTorch model __lowerCamelCase : Optional[int] = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) print('Building PyTorch model from configuration: {}'.format(str(SCREAMING_SNAKE_CASE__ ) ) ) __lowerCamelCase : str = RemBertModel(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model print('Save PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase_ = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
359
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowercase_ = { 'n_samples': 6_4, 'horizon': 3_2, 'num_inference_steps': 2_0, 'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network 'scale_grad_by_std': True, 'scale': 0.1, 'eta': 0.0, 't_grad_cutoff': 2, 'device': 'cpu', } if __name__ == "__main__": lowercase_ = 'hopper-medium-v2' lowercase_ = gym.make(env_name) lowercase_ = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) lowercase_ = env.reset() lowercase_ = 0 lowercase_ = 0 lowercase_ = 1_0_0_0 lowercase_ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowercase_ = pipeline(obs, planning_horizon=3_2) # execute action in environment lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = env.step(denorm_actions) lowercase_ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:""" F""" {total_score}""" ) # save observations for rendering rollout.append(next_observation.copy()) lowercase_ = next_observation except KeyboardInterrupt: pass print(F"""Total reward: {total_reward}""")
194
0
"""simple docstring""" from __future__ import annotations def UpperCamelCase ( UpperCAmelCase ) ->list: """simple docstring""" if len(UpperCAmelCase ) == 0: return [] a_ , a_ = min(UpperCAmelCase ), max(UpperCAmelCase ) a_ = int(max_value - min_value ) + 1 a_ = [[] for _ in range(UpperCAmelCase )] for i in my_list: buckets[int(i - min_value )].append(UpperCAmelCase ) return [v for bucket in buckets for v in sorted(UpperCAmelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
243
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" def is_in_circle(UpperCAmelCase , UpperCAmelCase ) -> bool: a_ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a_ = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(UpperCAmelCase ) ) # The ratio of the area for circle to square is pi/4. a_ = proportion * 4 print(F'''The estimated value of pi is {pi_estimate}''' ) print(F'''The numpy value of pi is {pi}''' ) print(F'''The total error is {abs(pi - pi_estimate )}''' ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , ) ->float: """simple docstring""" return mean( function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->None: """simple docstring""" def identity_function(UpperCAmelCase ) -> float: return x a_ = area_under_curve_estimator( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {expected_value}''' ) print(F'''Total error is {abs(estimated_value - expected_value )}''' ) print("******************" ) def UpperCamelCase ( UpperCAmelCase ) ->None: """simple docstring""" def function_to_integrate(UpperCAmelCase ) -> float: return sqrt(4.0 - x * x ) a_ = area_under_curve_estimator( UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {pi}''' ) print(F'''Total error is {abs(estimated_value - pi )}''' ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
243
1
'''simple docstring''' class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] )-> Dict: snake_case = None snake_case = None snake_case = graph self._normalize_graph(__snake_case , __snake_case ) snake_case = len(__snake_case ) snake_case = None def lowerCAmelCase ( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] )-> str: if sources is int: snake_case = [sources] if sinks is int: snake_case = [sinks] if len(__snake_case ) == 0 or len(__snake_case ) == 0: return snake_case = sources[0] snake_case = sinks[0] # make fake vertex if there are more # than one source or sink if len(__snake_case ) > 1 or len(__snake_case ) > 1: snake_case = 0 for i in sources: max_input_flow += sum(self.graph[i] ) snake_case = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: snake_case = max_input_flow snake_case = 0 snake_case = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: snake_case = max_input_flow snake_case = size - 1 def lowerCAmelCase ( self : Optional[int] )-> Tuple: if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def lowerCAmelCase ( self : Dict , __snake_case : Optional[int] )-> Any: snake_case = algorithm(self ) class _lowerCAmelCase : """simple docstring""" def __init__( self : str , __snake_case : Any )-> Union[str, Any]: snake_case = flow_network snake_case = flow_network.verticesCount snake_case = flow_network.sourceIndex snake_case = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that snake_case = flow_network.graph snake_case = False def lowerCAmelCase ( self : List[Any] )-> Dict: if not self.executed: self._algorithm() snake_case = True def lowerCAmelCase ( self : List[Any] )-> int: pass class _lowerCAmelCase ( A__ ): """simple docstring""" def __init__( self : List[Any] , __snake_case : Tuple )-> Tuple: super().__init__(__snake_case ) # use this to save your result snake_case = -1 def lowerCAmelCase ( self : Any )-> Optional[int]: if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class _lowerCAmelCase ( A__ ): """simple docstring""" def __init__( self : Tuple , __snake_case : Tuple )-> Tuple: super().__init__(__snake_case ) snake_case = [[0] * self.verticies_count for i in range(self.verticies_count )] snake_case = [0] * self.verticies_count snake_case = [0] * self.verticies_count def lowerCAmelCase ( self : int )-> Union[str, Any]: snake_case = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule snake_case = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list snake_case = 0 while i < len(__snake_case ): snake_case = vertices_list[i] snake_case = self.heights[vertex_index] self.process_vertex(__snake_case ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(__snake_case ) ) snake_case = 0 else: i += 1 snake_case = sum(self.preflow[self.source_index] ) def lowerCAmelCase ( self : List[Any] , __snake_case : Optional[Any] )-> str: while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(__snake_case , __snake_case ) self.relabel(__snake_case ) def lowerCAmelCase ( self : List[Any] , __snake_case : str , __snake_case : Optional[Any] )-> List[str]: snake_case = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def lowerCAmelCase ( self : Optional[Any] , __snake_case : Optional[int] )-> Tuple: snake_case = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): snake_case = self.heights[to_index] if min_height is not None: snake_case = min_height + 1 if __name__ == "__main__": _SCREAMING_SNAKE_CASE = [0] _SCREAMING_SNAKE_CASE = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] _SCREAMING_SNAKE_CASE = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network _SCREAMING_SNAKE_CASE = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate _SCREAMING_SNAKE_CASE = flow_network.find_maximum_flow() print(F"""maximum flow is {maximum_flow}""")
3
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1" _SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart" @require_torch class _lowerCAmelCase ( A__ ): """simple docstring""" def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple: snake_case = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , ) snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history if not do_eval: return snake_case = [log for log in logs if """eval_loss""" in log.keys()] snake_case = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats snake_case = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def lowerCAmelCase ( self : Tuple )-> int: self.run_seqaseq_quick() @require_torch_multi_gpu def lowerCAmelCase ( self : Union[str, Any] )-> Dict: self.run_seqaseq_quick(distributed=__snake_case ) @require_torch_multi_gpu def lowerCAmelCase ( self : str )-> List[Any]: self.run_seqaseq_quick(distributed=__snake_case ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowerCAmelCase ( self : Any )-> Dict: self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowerCAmelCase ( self : int )-> Dict: self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowerCAmelCase ( self : int )-> str: self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowerCAmelCase ( self : Any )-> List[Any]: self.run_seqaseq_quick( distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case ) @require_apex @require_torch_gpu def lowerCAmelCase ( self : Tuple )-> Union[str, Any]: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout snake_case = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } snake_case = experiments[experiment_id] snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} snake_case = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] ) snake_case = len(re.findall(__snake_case , cl.err ) ) self.assertEqual(__snake_case , data["""n_matches"""] ) @slow def lowerCAmelCase ( self : Tuple )-> List[Any]: snake_case = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , ) # Check metrics snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history snake_case = [log for log in logs if """eval_loss""" in log.keys()] snake_case = eval_metrics[0] snake_case = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case ) # test if do_predict saves generations and metrics snake_case = os.listdir(__snake_case ) snake_case = {os.path.basename(__snake_case ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def lowerCAmelCase ( self : str )-> Any: from transformers.training_args import OptimizerNames def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]: snake_case = """--skip_memory_metrics 0""" snake_case = self.run_trainer( max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , ) # Check metrics snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) snake_case = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb snake_case = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings snake_case = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict: snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" snake_case = self.get_auto_remove_tmp_dir() snake_case = f''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__snake_case )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__snake_case )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() snake_case = f''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__snake_case )} '''.split() snake_case = """ --do_predict """.split() snake_case = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: snake_case = get_gpu_count() snake_case = get_torch_dist_unique_port() snake_case = f''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() snake_case = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__snake_case , env=self.get_env() ) else: snake_case = ["""run_translation.py"""] + args with patch.object(__snake_case , """argv""" , __snake_case ): main() return output_dir
3
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: snake_case_ = None snake_case_ = logging.get_logger(__name__) snake_case_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case_ = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } snake_case_ = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } snake_case_ = """▁""" class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BigBirdTokenizer __UpperCamelCase = ["""input_ids""", """attention_mask"""] __UpperCamelCase = [] def __init__( self :List[Any] , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None , lowercase_ :str="<unk>" , lowercase_ :int="<s>" , lowercase_ :Union[str, Any]="</s>" , lowercase_ :Optional[int]="<pad>" , lowercase_ :int="[SEP]" , lowercase_ :int="[MASK]" , lowercase_ :int="[CLS]" , **lowercase_ :Optional[int] , ) -> int: UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token super().__init__( lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) UpperCAmelCase = vocab_file UpperCAmelCase = False if not self.vocab_file else True def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]: UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self :Dict , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1] def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]: UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowercase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase = os.path.join( lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
78
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class A_ ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = """beit""" def __init__( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any]=8_192 , lowerCamelCase_ :Dict=768 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :str=3_072 , lowerCamelCase_ :List[str]="gelu" , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.02 , lowerCamelCase_ :int=1e-12 , lowerCamelCase_ :str=224 , lowerCamelCase_ :List[str]=16 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :str=False , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=True , lowerCamelCase_ :Tuple=[3, 5, 7, 11] , lowerCamelCase_ :str=[1, 2, 3, 6] , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=0.4 , lowerCamelCase_ :List[str]=256 , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :int=255 , **lowerCamelCase_ :List[str] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) lowerCamelCase__ : Any =vocab_size lowerCamelCase__ : List[str] =hidden_size lowerCamelCase__ : List[Any] =num_hidden_layers lowerCamelCase__ : Tuple =num_attention_heads lowerCamelCase__ : List[str] =intermediate_size lowerCamelCase__ : List[Any] =hidden_act lowerCamelCase__ : Tuple =hidden_dropout_prob lowerCamelCase__ : Optional[int] =attention_probs_dropout_prob lowerCamelCase__ : Optional[int] =initializer_range lowerCamelCase__ : List[str] =layer_norm_eps lowerCamelCase__ : Dict =image_size lowerCamelCase__ : int =patch_size lowerCamelCase__ : Union[str, Any] =num_channels lowerCamelCase__ : Tuple =use_mask_token lowerCamelCase__ : Any =use_absolute_position_embeddings lowerCamelCase__ : str =use_relative_position_bias lowerCamelCase__ : int =use_shared_relative_position_bias lowerCamelCase__ : Optional[int] =layer_scale_init_value lowerCamelCase__ : Any =drop_path_rate lowerCamelCase__ : Dict =use_mean_pooling # decode head attributes (semantic segmentation) lowerCamelCase__ : List[str] =out_indices lowerCamelCase__ : List[Any] =pool_scales # auxiliary head attributes (semantic segmentation) lowerCamelCase__ : Tuple =use_auxiliary_head lowerCamelCase__ : List[str] =auxiliary_loss_weight lowerCamelCase__ : Tuple =auxiliary_channels lowerCamelCase__ : Union[str, Any] =auxiliary_num_convs lowerCamelCase__ : Optional[Any] =auxiliary_concat_input lowerCamelCase__ : Optional[int] =semantic_loss_ignore_index class A_ ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self :List[str] ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase__ ( self :Union[str, Any] ): """simple docstring""" return 1e-4
126
0
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase = datasets.utils.logging.get_logger(__name__) @dataclass class __magic_name__ ( datasets.BuilderConfig ): __A : Optional[datasets.Features] = None __A : str = "utf-8" __A : Optional[str] = None __A : Optional[str] = None __A : bool = True # deprecated __A : Optional[int] = None # deprecated __A : int = 10 << 20 # 10MB __A : Optional[bool] = None class __magic_name__ ( datasets.ArrowBasedBuilder ): __A : int = JsonConfig def __snake_case ( self : Any ): '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowercase :Any = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self : str , snake_case__ : Any ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowercase :Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case__ , (str, list, tuple) ): lowercase :Optional[Any] = data_files if isinstance(snake_case__ , snake_case__ ): lowercase :Any = [files] lowercase :Any = [dl_manager.iter_files(snake_case__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowercase :List[str] = [] for split_name, files in data_files.items(): if isinstance(snake_case__ , snake_case__ ): lowercase :Tuple = [files] lowercase :Union[str, Any] = [dl_manager.iter_files(snake_case__ ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self : Any , snake_case__ : pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowercase :List[Any] = self.config.features.arrow_schema.field(snake_case__ ).type lowercase :Dict = pa_table.append_column(snake_case__ , pa.array([None] * len(snake_case__ ) , type=snake_case__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowercase :Dict = table_cast(snake_case__ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self : Dict , snake_case__ : str ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(snake_case__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowercase :Any = json.load(snake_case__ ) # We keep only the field we are interested in lowercase :int = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(snake_case__ , (list, tuple) ): lowercase :Tuple = set().union(*[row.keys() for row in dataset] ) lowercase :Any = {col: [row.get(snake_case__ ) for row in dataset] for col in keys} else: lowercase :Any = dataset lowercase :Tuple = pa.Table.from_pydict(snake_case__ ) yield file_idx, self._cast_table(snake_case__ ) # If the file has one json object per line else: with open(snake_case__ , '''rb''' ) as f: lowercase :Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowercase :Optional[Any] = max(self.config.chunksize // 3_2 , 1_6 << 1_0 ) lowercase :str = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowercase :Union[str, Any] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(snake_case__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowercase :Tuple = batch.decode(self.config.encoding , errors=snake_case__ ).encode('''utf-8''' ) try: while True: try: lowercase :Tuple = paj.read_json( io.BytesIO(snake_case__ ) , read_options=paj.ReadOptions(block_size=snake_case__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(snake_case__ , pa.ArrowInvalid ) and "straddling" not in str(snake_case__ ) or block_size > len(snake_case__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(snake_case__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( snake_case__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowercase :Optional[Any] = json.load(snake_case__ ) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(snake_case__ , snake_case__ ): # list is the only sequence type supported in JSON try: lowercase :Union[str, Any] = set().union(*[row.keys() for row in dataset] ) lowercase :Any = {col: [row.get(snake_case__ ) for row in dataset] for col in keys} lowercase :Tuple = pa.Table.from_pydict(snake_case__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" ) raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(snake_case__ ) break else: logger.error(f"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" ) raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(snake_case__ ) batch_idx += 1
352
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowerCamelCase (a_ :str , a_ :str) -> str | Literal[False]: lowercase :Union[str, Any] = list(a_) lowercase :Optional[Any] = list(a_) lowercase :str = 0 for i in range(len(a_)): if lista[i] != lista[i]: count += 1 lowercase :str = '''_''' if count > 1: return False else: return "".join(a_) def lowerCamelCase (a_ :list[str]) -> list[str]: lowercase :Optional[Any] = [] while True: lowercase :Tuple = ['''$'''] * len(a_) lowercase :Tuple = [] for i in range(len(a_)): for j in range(i + 1 , len(a_)): lowercase :Optional[int] = compare_string(binary[i] , binary[j]) if k is False: lowercase :Tuple = '''*''' lowercase :Any = '''*''' temp.append('''X''') for i in range(len(a_)): if checka[i] == "$": pi.append(binary[i]) if len(a_) == 0: return pi lowercase :str = list(set(a_)) def lowerCamelCase (a_ :int , a_ :Sequence[float]) -> list[str]: lowercase :Optional[int] = [] for minterm in minterms: lowercase :List[str] = '''''' for _ in range(a_): lowercase :List[str] = str(minterm % 2) + string minterm //= 2 temp.append(a_) return temp def lowerCamelCase (a_ :str , a_ :str , a_ :int) -> bool: lowercase :int = list(a_) lowercase :str = list(a_) lowercase :List[str] = 0 for i in range(len(a_)): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowerCamelCase (a_ :list[list[int]] , a_ :list[str]) -> list[str]: lowercase :Any = [] lowercase :List[Any] = [0] * len(a_) for i in range(len(chart[0])): lowercase :List[Any] = 0 lowercase :int = -1 for j in range(len(a_)): if chart[j][i] == 1: count += 1 lowercase :List[Any] = j if count == 1: lowercase :Tuple = 1 for i in range(len(a_)): if select[i] == 1: for j in range(len(chart[0])): if chart[i][j] == 1: for k in range(len(a_)): lowercase :List[str] = 0 temp.append(prime_implicants[i]) while True: lowercase :Tuple = 0 lowercase :Dict = -1 lowercase :int = 0 for i in range(len(a_)): lowercase :List[Any] = chart[i].count(1) if count_n > max_n: lowercase :List[Any] = count_n lowercase :int = i if max_n == 0: return temp temp.append(prime_implicants[rem]) for i in range(len(chart[0])): if chart[rem][i] == 1: for j in range(len(a_)): lowercase :Tuple = 0 def lowerCamelCase (a_ :list[str] , a_ :list[str]) -> list[list[int]]: lowercase :Dict = [[0 for x in range(len(a_))] for x in range(len(a_))] for i in range(len(a_)): lowercase :Any = prime_implicants[i].count('''_''') for j in range(len(a_)): if is_for_table(prime_implicants[i] , binary[j] , a_): lowercase :int = 1 return chart def lowerCamelCase () -> None: lowercase :int = int(input('''Enter the no. of variables\n''')) lowercase :Tuple = [ float(a_) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split() ] lowercase :Dict = decimal_to_binary(a_ , a_) lowercase :List[Any] = check(a_) print('''Prime Implicants are:''') print(a_) lowercase :Union[str, Any] = prime_implicant_chart(a_ , a_) lowercase :Dict = selection(a_ , a_) print('''Essential Prime Implicants are:''') print(a_) if __name__ == "__main__": import doctest doctest.testmod() main()
172
0
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A (snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = """ClapFeatureExtractor""" __lowercase: Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Tuple ) ->Any: """simple docstring""" snake_case_ = kwargs.pop("""sampling_rate""" , UpperCAmelCase_ ) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""" ) if text is not None: snake_case_ = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if audios is not None: snake_case_ = self.feature_extractor( UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and audios is not None: snake_case_ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def lowerCAmelCase ( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ) ->str: """simple docstring""" return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" snake_case_ = self.tokenizer.model_input_names snake_case_ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase : str = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowercase : List[Any] = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowercase : Dict = None # the split name of split_dict takes over the name of the split info object lowercase : Dict = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase : Dict = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
354
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _UpperCamelCase: Optional[int] = logging.get_logger(__name__) _UpperCamelCase: Union[str, Any] = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = 'gpt_neo' _lowerCamelCase = ['past_key_values'] _lowerCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Optional[Any], lowerCAmelCase : int=50257, lowerCAmelCase : Tuple=2048, lowerCAmelCase : int=2048, lowerCAmelCase : Tuple=24, lowerCAmelCase : Optional[Any]=[[["global", "local"], 12]], lowerCAmelCase : Optional[int]=16, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : Dict=256, lowerCAmelCase : Optional[int]="gelu_new", lowerCAmelCase : Any=0.0, lowerCAmelCase : Dict=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[Any]=1e-5, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : Dict=True, lowerCAmelCase : int=50256, lowerCAmelCase : Optional[Any]=50256, **lowerCAmelCase : Any, ) -> Optional[Any]: lowercase : List[Any] = vocab_size lowercase : Optional[Any] = max_position_embeddings lowercase : Dict = hidden_size lowercase : Optional[Any] = num_layers lowercase : str = num_heads lowercase : Optional[int] = intermediate_size lowercase : List[str] = window_size lowercase : Dict = activation_function lowercase : Dict = resid_dropout lowercase : int = embed_dropout lowercase : Optional[Any] = attention_dropout lowercase : Tuple = classifier_dropout lowercase : Optional[int] = layer_norm_epsilon lowercase : Dict = initializer_range lowercase : Optional[Any] = use_cache lowercase : Union[str, Any] = bos_token_id lowercase : int = eos_token_id lowercase : str = attention_types lowercase : int = self.expand_attention_types_params(lowerCAmelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) @staticmethod def lowercase ( lowerCAmelCase : str ) -> Optional[Any]: lowercase : Dict = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: '''simple docstring''' import torch lowercase : Dict = input.size() lowercase : Optional[int] = len(_UpperCAmelCase ) lowercase : str = shape[dimension] lowercase : Optional[Any] = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase ) lowercase : List[str] = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor' ) + 1 lowercase : Any = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None] lowercase : List[Any] = [slice(_UpperCAmelCase )] * rank lowercase : int = indices lowercase : Optional[Any] = input[s] lowercase : str = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_UpperCAmelCase ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any: '''simple docstring''' import torch lowercase : int = torch.arange(1 , _UpperCAmelCase ) lowercase : List[str] = torch.remainder(_UpperCAmelCase , _UpperCAmelCase ) lowercase : Optional[int] = remainders == 0 lowercase : Tuple = candidates[divisor_indices] lowercase : Any = torch.max(_UpperCAmelCase ) return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor' ) class a__ ( SCREAMING_SNAKE_CASE__ ): @property def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]: lowercase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase, direction='inputs' ) lowercase : Dict = {0: 'batch', 1: 'past_sequence + sequence'} else: lowercase : List[str] = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowercase ( self : int ) -> int: return self._config.num_heads def lowercase ( self : Tuple, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Mapping[str, Any]: lowercase : Union[str, Any] = super(lowerCAmelCase, self ).generate_dummy_inputs( lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowercase , lowercase : str = common_inputs['input_ids'].shape # Not using the same length for past_key_values lowercase : Tuple = seqlen + 2 lowercase : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase : Any = [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers ) ] lowercase : Optional[int] = common_inputs['attention_mask'] if self.use_past: lowercase : Optional[int] = ordered_inputs['attention_mask'].dtype lowercase : Dict = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 ) return ordered_inputs @property def lowercase ( self : int ) -> int: return 13
53
0
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() a_ : List[str] = logging.get_logger("""transformers.models.encodec""") a_ : List[Any] = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } a_ : int = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } a_ : Tuple = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } a_ : Dict = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } a_ : Union[str, Any] = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } a_ : Any = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } a_ : str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } a_ : int = [] a_ : str = [] def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ): for attribute in key.split("." ): lowerCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: lowerCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "weight_ih_l0": lowerCamelCase_ = value elif weight_type == "weight_hh_l0": lowerCamelCase_ = value elif weight_type == "bias_ih_l0": lowerCamelCase_ = value elif weight_type == "bias_hh_l0": lowerCamelCase_ = value elif weight_type == "weight_ih_l1": lowerCamelCase_ = value elif weight_type == "weight_hh_l1": lowerCamelCase_ = value elif weight_type == "bias_ih_l1": lowerCamelCase_ = value elif weight_type == "bias_hh_l1": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ): lowerCamelCase_ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase_ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase_ = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): logger.info(F'''{name} was ignored''' ) continue lowerCamelCase_ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: lowerCamelCase_ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: lowerCamelCase_ = 'weight_g' elif "weight_v" in name: lowerCamelCase_ = 'weight_v' elif "weight_ih_l0" in name: lowerCamelCase_ = 'weight_ih_l0' elif "weight_hh_l0" in name: lowerCamelCase_ = 'weight_hh_l0' elif "bias_ih_l0" in name: lowerCamelCase_ = 'bias_ih_l0' elif "bias_hh_l0" in name: lowerCamelCase_ = 'bias_hh_l0' elif "weight_ih_l1" in name: lowerCamelCase_ = 'weight_ih_l1' elif "weight_hh_l1" in name: lowerCamelCase_ = 'weight_hh_l1' elif "bias_ih_l1" in name: lowerCamelCase_ = 'bias_ih_l1' elif "bias_hh_l1" in name: lowerCamelCase_ = 'bias_hh_l1' elif "bias" in name: lowerCamelCase_ = 'bias' elif "weight" in name: lowerCamelCase_ = 'weight' elif "running_mean" in name: lowerCamelCase_ = 'running_mean' elif "running_var" in name: lowerCamelCase_ = 'running_var' elif "num_batches_tracked" in name: lowerCamelCase_ = 'num_batches_tracked' else: lowerCamelCase_ = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , ): if config_path is not None: lowerCamelCase_ = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase_ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase_ = [8, 5, 4, 4] lowerCamelCase_ = [2.2] lowerCamelCase_ = 64 lowerCamelCase_ = 32000 lowerCamelCase_ = 2048 lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False elif model_name == "encodec_48khz": lowerCamelCase_ = [8, 5, 4, 2] lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0] lowerCamelCase_ = 48000 lowerCamelCase_ = 2 lowerCamelCase_ = False lowerCamelCase_ = 'time_group_norm' lowerCamelCase_ = True lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowerCamelCase_ = EncodecModel(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.load(_SCREAMING_SNAKE_CASE ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase_ = original_checkpoint['best_state'] recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE ) model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) a_ : Optional[int] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
55
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: if height >= 1: move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE ) def __a ( ) ->List[str]: a__: Dict = int(input('Height of hanoi: ' ).strip() ) move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' ) if __name__ == "__main__": main()
290
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : int = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class UpperCamelCase ( a_ ): """simple docstring""" A : int = "deit" def __init__( self : Any , UpperCAmelCase_ : Optional[int]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=1e-12 , UpperCAmelCase_ : str=2_2_4 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=1_6 , **UpperCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__(**UpperCAmelCase_) a : List[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Optional[Any] = num_attention_heads a : Union[str, Any] = intermediate_size a : Optional[Any] = hidden_act a : str = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : Dict = initializer_range a : Optional[Any] = layer_norm_eps a : Dict = image_size a : int = patch_size a : List[Any] = num_channels a : Optional[int] = qkv_bias a : str = encoder_stride class UpperCamelCase ( a_ ): """simple docstring""" A : Tuple = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def SCREAMING_SNAKE_CASE_ ( self : str): """simple docstring""" return 1e-4
351
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float: """simple docstring""" if len(snake_case ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(snake_case ) or left < -len(snake_case ) or right >= len(snake_case ) or right < -len(snake_case ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] a : Union[str, Any] = (left + right) >> 1 # the middle a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid] a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
345
0
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = '''Hello, World!''' lowerCamelCase = '''en_XX''' def UpperCAmelCase__ ( _A : str , _A : str , _A : bool ): '''simple docstring''' a__ =Path('''data_bin''' ) a__ =FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_A ).parent ) , checkpoint_file=Path(_A ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_A ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_A ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , ) xmod.eval() # disable dropout print(_A ) a__ =xmod.model.encoder.sentence_encoder a__ =XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: a__ =xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , _A ) a__ =XmodForSequenceClassification(_A ) if classification_head else XmodForMaskedLM(_A ) model.eval() # Now let's copy all the weights. # Embeddings a__ =xmod_sent_encoder.embed_tokens.weight a__ =xmod_sent_encoder.embed_positions.weight a__ =torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. a__ =xmod_sent_encoder.layernorm_embedding.weight a__ =xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer a__ =model.roberta.encoder.layer[i] a__ =xmod_sent_encoder.layers[i] # self attention a__ =layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('''Dimensions of self-attention weights do not match.''' ) a__ =xmod_layer.self_attn.q_proj.weight a__ =xmod_layer.self_attn.q_proj.bias a__ =xmod_layer.self_attn.k_proj.weight a__ =xmod_layer.self_attn.k_proj.bias a__ =xmod_layer.self_attn.v_proj.weight a__ =xmod_layer.self_attn.v_proj.bias # self-attention output a__ =layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''' ) a__ =xmod_layer.self_attn.out_proj.weight a__ =xmod_layer.self_attn.out_proj.bias a__ =xmod_layer.self_attn_layer_norm.weight a__ =xmod_layer.self_attn_layer_norm.bias # intermediate a__ =layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''' ) a__ =xmod_layer.fca.weight a__ =xmod_layer.fca.bias # output a__ =layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''' ) a__ =xmod_layer.fca.weight a__ =xmod_layer.fca.bias a__ =xmod_layer.final_layer_norm.weight a__ =xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: a__ =xmod_layer.adapter_layer_norm.weight a__ =xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('''Lists of language adapters do not match.''' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): a__ =bert_output.adapter_modules[lang_code] a__ =xmod_layer.adapter_modules[lang_code] a__ =from_adapter.fca.weight a__ =from_adapter.fca.bias a__ =from_adapter.fca.weight a__ =from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: a__ =xmod_sent_encoder.layer_norm.weight a__ =xmod_sent_encoder.layer_norm.bias if classification_head: a__ =xmod.model.classification_heads['''mnli'''].dense.weight a__ =xmod.model.classification_heads['''mnli'''].dense.bias a__ =xmod.model.classification_heads['''mnli'''].out_proj.weight a__ =xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head a__ =xmod.model.encoder.lm_head.dense.weight a__ =xmod.model.encoder.lm_head.dense.bias a__ =xmod.model.encoder.lm_head.layer_norm.weight a__ =xmod.model.encoder.lm_head.layer_norm.bias a__ =xmod.model.encoder.lm_head.weight a__ =xmod.model.encoder.lm_head.bias # Let's check that we get the same results. a__ =xmod.encode(_A ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_A ) a__ =model(_A )[0] if classification_head: a__ =xmod.model.classification_heads['''mnli'''](xmod.extract_features(_A ) ) else: a__ =xmod.model(_A , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) a__ =torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 a__ =torch.allclose(_A , _A , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) Path(_A ).mkdir(parents=_A , exist_ok=_A ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_A ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) lowerCamelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
188
from ..utils import DummyObject, requires_backends class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[str] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : int = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[str] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) def UpperCAmelCase__ ( *_A : List[str] , **_A : str ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ): '''simple docstring''' requires_backends(_A , ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : str = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Tuple = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Tuple = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[str] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Any = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[str] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : str = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : int = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : str = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : int = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Tuple = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : int = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : str = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : str = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> List[str]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : List[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Dict = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Any = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : int = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] ) class __magic_name__ ( metaclass=lowerCamelCase__ ): '''simple docstring''' lowerCamelCase__ : int = ['torch'] def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str: """simple docstring""" requires_backends(cls, ['''torch'''] )
188
1
from __future__ import annotations from math import ceil, floor, sqrt def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 200_0000 ): lowercase__ = [0] lowercase__ = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target lowercase__ = 0 # the area corresponding to the grid that gives the product closest to target lowercase__ = 0 # an estimate of b, using the quadratic formula lowercase__ = 42 # the largest integer less than b_estimate lowercase__ = 42 # the largest integer less than b_estimate lowercase__ = 42 # the triangle number corresponding to b_floor lowercase__ = 42 # the triangle number corresponding to b_ceil lowercase__ = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): lowercase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 lowercase__ = floor(lowerCAmelCase__ ) lowercase__ = ceil(lowerCAmelCase__ ) lowercase__ = triangle_numbers[b_floor] lowercase__ = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): lowercase__ = triangle_b_first_guess * triangle_a lowercase__ = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): lowercase__ = triangle_b_second_guess * triangle_a lowercase__ = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
352
from __future__ import annotations from typing import TypedDict class _snake_case ( lowercase__): UpperCamelCase__ : str UpperCamelCase__ : int def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("The parameter s type must be str." ) return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("The parameter s type must be str." ) if not s: raise ValueError("The parameter s must not be empty." ) lowercase__ = all_rotations(SCREAMING_SNAKE_CASE_ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation lowercase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ), } return response def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("The parameter bwt_string type must be str." ) if not bwt_string: raise ValueError("The parameter bwt_string must not be empty." ) try: lowercase__ = int(SCREAMING_SNAKE_CASE_ ) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int." ) if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0." ) if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string)." ) lowercase__ = [""] * len(SCREAMING_SNAKE_CASE_ ) for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): for i in range(len(SCREAMING_SNAKE_CASE_ ) ): lowercase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase_ = """Provide a string that I will generate its BWT transform: """ lowercase_ = input(entry_msg).strip() lowercase_ = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) lowercase_ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
224
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy""" def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int: '''simple docstring''' snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case : str = "bf16" if fpaa else None snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]: '''simple docstring''' snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]: '''simple docstring''' snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ ) snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) snake_case : Dict = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ ) snake_case : Optional[int] = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
59
1
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent A__ = {"""UserAgent""": UserAgent().random} def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = script.contents[0] _lowerCAmelCase = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __lowerCAmelCase : def __init__( self , _snake_case ): """simple docstring""" _lowerCAmelCase = F'https://www.instagram.com/{username}/' _lowerCAmelCase = self.get_json() def snake_case ( self ): """simple docstring""" _lowerCAmelCase = requests.get(self.url , headers=_snake_case ).text _lowerCAmelCase = BeautifulSoup(_snake_case , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self ): """simple docstring""" return F'{self.__class__.__name__}(\'{self.username}\')' def __str__( self ): """simple docstring""" return F'{self.fullname} ({self.username}) is {self.biography}' @property def snake_case ( self ): """simple docstring""" return self.user_data["username"] @property def snake_case ( self ): """simple docstring""" return self.user_data["full_name"] @property def snake_case ( self ): """simple docstring""" return self.user_data["biography"] @property def snake_case ( self ): """simple docstring""" return self.user_data["business_email"] @property def snake_case ( self ): """simple docstring""" return self.user_data["external_url"] @property def snake_case ( self ): """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def snake_case ( self ): """simple docstring""" return self.user_data["edge_follow"]["count"] @property def snake_case ( self ): """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def snake_case ( self ): """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def snake_case ( self ): """simple docstring""" return self.user_data["is_verified"] @property def snake_case ( self ): """simple docstring""" return self.user_data["is_private"] def _UpperCAmelCase ( snake_case = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions _lowerCAmelCase = InstagramUser(snake_case ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , snake_case ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() A__ = InstagramUser("""github""") print(instagram_user) print(f"{instagram_user.number_of_posts = }") print(f"{instagram_user.number_of_followers = }") print(f"{instagram_user.number_of_followings = }") print(f"{instagram_user.email = }") print(f"{instagram_user.website = }") print(f"{instagram_user.profile_picture_url = }") print(f"{instagram_user.is_verified = }") print(f"{instagram_user.is_private = }")
82
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = XCLIPTextConfig() # derive patch size from model name _lowerCAmelCase = model_name.find("""patch""" ) _lowerCAmelCase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _lowerCAmelCase = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: _lowerCAmelCase = 7_68 _lowerCAmelCase = 30_72 _lowerCAmelCase = 12 _lowerCAmelCase = 10_24 _lowerCAmelCase = 40_96 _lowerCAmelCase = 16 _lowerCAmelCase = 24 _lowerCAmelCase = 7_68 _lowerCAmelCase = 30_72 if model_name == "xclip-large-patch14-16-frames": _lowerCAmelCase = 3_36 _lowerCAmelCase = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: _lowerCAmelCase = 7_68 return config def _UpperCAmelCase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": _lowerCAmelCase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _lowerCAmelCase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _lowerCAmelCase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _lowerCAmelCase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _lowerCAmelCase = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _lowerCAmelCase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _lowerCAmelCase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _lowerCAmelCase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _lowerCAmelCase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _lowerCAmelCase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _lowerCAmelCase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _lowerCAmelCase = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _lowerCAmelCase = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _lowerCAmelCase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _lowerCAmelCase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _lowerCAmelCase = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _lowerCAmelCase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _lowerCAmelCase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCAmelCase = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: _lowerCAmelCase = key.split(""".""" ) if key.startswith("""visual""" ): _lowerCAmelCase = key_split[3] _lowerCAmelCase = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _lowerCAmelCase = val[ :dim, : ] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[ -dim:, : ] else: _lowerCAmelCase = val[ :dim ] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[ -dim: ] else: if "weight" in key: _lowerCAmelCase = val[ :dim, : ] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[ -dim:, : ] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[-dim:] elif key.startswith("""mit""" ): _lowerCAmelCase = key_split[2] _lowerCAmelCase = config.vision_config.mit_hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[dim : dim * 2, :] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[dim : dim * 2] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = key_split[2] _lowerCAmelCase = config.text_config.hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _lowerCAmelCase = val.T _lowerCAmelCase = val return orig_state_dict def _UpperCAmelCase ( snake_case ): """simple docstring""" if num_frames == 8: _lowerCAmelCase = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _lowerCAmelCase = """eating_spaghetti.npy""" elif num_frames == 32: _lowerCAmelCase = """eating_spaghetti_32_frames.npy""" _lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=snake_case , repo_type="""dataset""" , ) _lowerCAmelCase = np.load(snake_case ) return list(snake_case ) def _UpperCAmelCase ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" _lowerCAmelCase = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _lowerCAmelCase = model_to_url[model_name] _lowerCAmelCase = 8 if "16-frames" in model_name: _lowerCAmelCase = 16 elif "shot" in model_name: _lowerCAmelCase = 32 _lowerCAmelCase = get_xclip_config(snake_case , snake_case ) _lowerCAmelCase = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: _lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(snake_case , snake_case , quiet=snake_case ) _lowerCAmelCase = torch.load(snake_case , map_location="""cpu""" )["""model"""] else: _lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case )["""model"""] _lowerCAmelCase = convert_state_dict(snake_case , snake_case ) _lowerCAmelCase = XCLIPModel(snake_case ) _lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _lowerCAmelCase = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _lowerCAmelCase = VideoMAEImageProcessor(size=snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _lowerCAmelCase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _lowerCAmelCase = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) _lowerCAmelCase = prepare_video(snake_case ) _lowerCAmelCase = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=snake_case , return_tensors="""pt""" , padding=snake_case ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _lowerCAmelCase = model(**snake_case ) # Verify outputs _lowerCAmelCase = outputs.logits_per_video _lowerCAmelCase = logits_per_video.softmax(dim=1 ) print("""Probs:""" , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": _lowerCAmelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": _lowerCAmelCase = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] ) elif model_name == "xclip-base-patch16": _lowerCAmelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": _lowerCAmelCase = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] ) elif model_name == "xclip-large-patch14": _lowerCAmelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": _lowerCAmelCase = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _lowerCAmelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _lowerCAmelCase = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _lowerCAmelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _lowerCAmelCase = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _lowerCAmelCase = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _lowerCAmelCase = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _lowerCAmelCase = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _lowerCAmelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _lowerCAmelCase = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _lowerCAmelCase = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] ) else: raise ValueError(F'Model name {model_name} not supported' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(snake_case , organization="""nielsr""" ) processor.push_to_hub(snake_case , organization="""nielsr""" ) slow_tokenizer.push_to_hub(snake_case , organization="""nielsr""" ) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) A__ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
82
1
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def _A (self ): torch.manual_seed(0 ) __lowercase= UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def _A (self ): __lowercase= self.dummy_uncond_unet __lowercase= KarrasVeScheduler() __lowercase= KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= torch.manual_seed(0 ) __lowercase= pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type='numpy' , return_dict=lowerCAmelCase )[0] __lowercase= image[0, -3:, -3:, -1] __lowercase= image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __lowercase= np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def _A (self ): __lowercase= 'google/ncsnpp-celebahq-256' __lowercase= UNetaDModel.from_pretrained(lowerCAmelCase ) __lowercase= KarrasVeScheduler() __lowercase= KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe(num_inference_steps=2_0 , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) __lowercase= np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
304
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase = False class A ( unittest.TestCase ): pass @nightly @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase ) __lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= generator.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'cyberpunk 2077' __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= 'A painting of a squirrel eating a burger ' __lowercase= torch.manual_seed(0 ) __lowercase= pipe.text_to_image( prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
304
1
'''simple docstring''' def UpperCamelCase_ ( A__ : int ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def UpperCamelCase_ ( A__ : Dict ): '''simple docstring''' lowerCAmelCase_ : Tuple = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key lowerCAmelCase_ : Dict = remove_duplicates(key.upper() ) lowerCAmelCase_ : Tuple = len(__snake_case ) # First fill cipher with key characters lowerCAmelCase_ : Union[str, Any] = {alphabet[i]: char for i, char in enumerate(__snake_case )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__snake_case ) , 26 ): lowerCAmelCase_ : List[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 lowerCAmelCase_ : Optional[Any] = alphabet[i - offset] lowerCAmelCase_ : Union[str, Any] = char return cipher_alphabet def UpperCamelCase_ ( A__ : Dict , A__ : Dict ): '''simple docstring''' return "".join(cipher_map.get(__snake_case , __snake_case ) for ch in message.upper() ) def UpperCamelCase_ ( A__ : Optional[Any] , A__ : Union[str, Any] ): '''simple docstring''' lowerCAmelCase_ : List[Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__snake_case , __snake_case ) for ch in message.upper() ) def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = input("""Enter message to encode or decode: """ ).strip() lowerCAmelCase_ : List[Any] = input("""Enter keyword: """ ).strip() lowerCAmelCase_ : List[Any] = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: lowerCAmelCase_ : Dict = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) lowerCAmelCase_ : Any = create_cipher_map(__snake_case ) print(func(__snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
120
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a , __a , __a , ) -> Optional[int]: '''simple docstring''' super().__init__() _UpperCamelCase = value_function _UpperCamelCase = unet _UpperCamelCase = scheduler _UpperCamelCase = env _UpperCamelCase = env.get_dataset() _UpperCamelCase = {} for key in self.data.keys(): try: _UpperCamelCase = self.data[key].mean() except: # noqa: E722 pass _UpperCamelCase = {} for key in self.data.keys(): try: _UpperCamelCase = self.data[key].std() except: # noqa: E722 pass _UpperCamelCase = env.observation_space.shape[0] _UpperCamelCase = env.action_space.shape[0] def UpperCAmelCase ( self , __a , __a) -> int: '''simple docstring''' return (x_in - self.means[key]) / self.stds[key] def UpperCAmelCase ( self , __a , __a) -> List[str]: '''simple docstring''' return x_in * self.stds[key] + self.means[key] def UpperCAmelCase ( self , __a) -> Union[str, Any]: '''simple docstring''' if type(__a) is dict: return {k: self.to_torch(__a) for k, v in x_in.items()} elif torch.is_tensor(__a): return x_in.to(self.unet.device) return torch.tensor(__a , device=self.unet.device) def UpperCAmelCase ( self , __a , __a , __a) -> str: '''simple docstring''' for key, val in cond.items(): _UpperCamelCase = val.clone() return x_in def UpperCAmelCase ( self , __a , __a , __a , __a) -> int: '''simple docstring''' _UpperCamelCase = x.shape[0] _UpperCamelCase = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model _UpperCamelCase = torch.full((batch_size,) , __a , device=self.unet.device , dtype=torch.long) for _ in range(__a): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _UpperCamelCase = self.value_function(x.permute(0 , 2 , 1) , __a).sample _UpperCamelCase = torch.autograd.grad([y.sum()] , [x])[0] _UpperCamelCase = self.scheduler._get_variance(__a) _UpperCamelCase = torch.exp(0.5 * posterior_variance) _UpperCamelCase = model_std * grad _UpperCamelCase = 0 _UpperCamelCase = x.detach() _UpperCamelCase = x + scale * grad _UpperCamelCase = self.reset_xa(__a , __a , self.action_dim) _UpperCamelCase = self.unet(x.permute(0 , 2 , 1) , __a).sample.permute(0 , 2 , 1) # TODO: verify deprecation of this kwarg _UpperCamelCase = self.scheduler.step(__a , __a , __a , predict_epsilon=__a)['''prev_sample'''] # apply conditions to the trajectory (set the initial state) _UpperCamelCase = self.reset_xa(__a , __a , self.action_dim) _UpperCamelCase = self.to_torch(__a) return x, y def __call__( self , __a , __a=64 , __a=32 , __a=2 , __a=0.1) -> Optional[Any]: '''simple docstring''' # normalize the observations and create batch dimension _UpperCamelCase = self.normalize(__a , '''observations''') _UpperCamelCase = obs[None].repeat(__a , axis=0) _UpperCamelCase = {0: self.to_torch(__a)} _UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _UpperCamelCase = randn_tensor(__a , device=self.unet.device) _UpperCamelCase = self.reset_xa(__a , __a , self.action_dim) _UpperCamelCase = self.to_torch(__a) # run the diffusion process _UpperCamelCase , _UpperCamelCase = self.run_diffusion(__a , __a , __a , __a) # sort output trajectories by value _UpperCamelCase = y.argsort(0 , descending=__a).squeeze() _UpperCamelCase = x[sorted_idx] _UpperCamelCase = sorted_values[:, :, : self.action_dim] _UpperCamelCase = actions.detach().cpu().numpy() _UpperCamelCase = self.de_normalize(__a , key='''actions''') # select the action with the highest value if y is not None: _UpperCamelCase = 0 else: # if we didn't run value guiding, select a random action _UpperCamelCase = np.random.randint(0 , __a) _UpperCamelCase = denorm_actions[selected_index, 0] return denorm_actions
194
0
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin _snake_case : str = get_tests_dir("fixtures/test_sentencepiece.model") _snake_case : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") _snake_case : str = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Dict = CamembertTokenizer __UpperCAmelCase : Optional[int] = CamembertTokenizerFast __UpperCAmelCase : str = True __UpperCAmelCase : Dict = True def __snake_case ( self : Tuple ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing __snake_case : Dict = CamembertTokenizer(lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : Union[str, Any] ) -> Optional[Any]: __snake_case : List[str] = "<pad>" __snake_case : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase ) def __snake_case ( self : Dict ) -> Tuple: __snake_case : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCamelCase ) , 1004 ) def __snake_case ( self : Dict ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __snake_case ( self : Dict ) -> Optional[int]: __snake_case : int = CamembertTokenizer(lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) __snake_case : Any = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __snake_case : Union[str, Any] = "I was born in 92000, and this is falsé." __snake_case : Optional[Any] = tokenizer.encode(lowerCamelCase ) __snake_case : Optional[Any] = rust_tokenizer.encode(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __snake_case : int = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __snake_case : Optional[int] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase ) __snake_case : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) def __snake_case ( self : List[str] ) -> str: if not self.test_rust_tokenizer: return __snake_case : Optional[int] = self.get_tokenizer() __snake_case : Any = self.get_rust_tokenizer() __snake_case : Tuple = "I was born in 92000, and this is falsé." __snake_case : Tuple = tokenizer.tokenize(lowerCamelCase ) __snake_case : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __snake_case : Optional[Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __snake_case : List[Any] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __snake_case : List[Any] = self.get_rust_tokenizer() __snake_case : Any = tokenizer.encode(lowerCamelCase ) __snake_case : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) @slow def __snake_case ( self : Union[str, Any] ) -> List[Any]: __snake_case : Any = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __snake_case : Tuple = [ "Le transformeur est un modèle d\'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCamelCase , )
353
from ..utils import DummyObject, requires_backends class a (metaclass=_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : int = ["speech"] def __init__( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Dict: requires_backends(self , ["speech"] ) class a (metaclass=_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["speech"] def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]: requires_backends(self , ["speech"] )
134
0
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
1
"""simple docstring""" def __A ( a_ :int) -> bool: if num < 0: return False __a : int = num __a : int = 0 while num > 0: __a : Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
188
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {'''vocab_file''': '''spiece.model'''} A = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } A = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } A = '''▁''' class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __a : int = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) __a : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __a : Tuple = do_lower_case __a : Optional[Any] = remove_space __a : Optional[Any] = keep_accents __a : Union[str, Any] = vocab_file __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) @property def _lowerCamelCase ( self ): return len(self.sp_model ) def _lowerCamelCase ( self ): __a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __a : str = self.__dict__.copy() __a : Tuple = None return state def __setstate__( self , _UpperCAmelCase ): __a : Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a : Optional[Any] = {} __a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self , _UpperCAmelCase ): if self.remove_space: __a : Any = ''' '''.join(inputs.strip().split() ) else: __a : Tuple = inputs __a : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: __a : List[str] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase ) __a : Optional[int] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: __a : Optional[Any] = outputs.lower() return outputs def _lowerCamelCase ( self , _UpperCAmelCase ): __a : int = self.preprocess_text(_UpperCAmelCase ) __a : Tuple = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) __a : int = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): __a : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __a : Tuple = cur_pieces[1:] else: __a : Optional[Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def _lowerCamelCase ( self , _UpperCAmelCase ): return self.sp_model.PieceToId(_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : List[str] = [] __a : str = '''''' __a : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase ) + token __a : Tuple = True __a : Tuple = [] else: current_sub_tokens.append(_UpperCAmelCase ) __a : Optional[int] = False out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : int = [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : Union[str, Any] = [self.sep_token_id] __a : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: __a : Any = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
188
1
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging A: str = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int: '''simple docstring''' super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=_SCREAMING_SNAKE_CASE , speech_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]: '''simple docstring''' if slice_size == "auto": UpperCAmelCase : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' self.enable_attention_slicing(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Any: '''simple docstring''' UpperCAmelCase : Any = self.speech_processor.feature_extractor( _SCREAMING_SNAKE_CASE , return_tensors="""pt""" , sampling_rate=_SCREAMING_SNAKE_CASE ).input_features.to(self.device ) UpperCAmelCase : Union[str, Any] = self.speech_model.generate(_SCREAMING_SNAKE_CASE , max_length=480000 ) UpperCAmelCase : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , normalize=_SCREAMING_SNAKE_CASE )[ 0 ] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : Tuple = 1 elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(_SCREAMING_SNAKE_CASE )}." ) # get prompt text embeddings UpperCAmelCase : Union[str, Any] = self.tokenizer( _SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCAmelCase : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = text_embeddings.shape UpperCAmelCase : Optional[Any] = text_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) UpperCAmelCase : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : List[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : str = [""""""] * batch_size elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !=" F" {type(_SCREAMING_SNAKE_CASE )}." ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : Union[str, Any] = [negative_prompt] elif batch_size != len(_SCREAMING_SNAKE_CASE ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : str = self.tokenizer( _SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) UpperCAmelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : Dict = uncond_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) UpperCAmelCase : int = uncond_embeddings.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Tuple = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : List[str] = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device="""cpu""" , dtype=_SCREAMING_SNAKE_CASE ).to( self.device ) else: UpperCAmelCase : Optional[int] = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE ) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCAmelCase : List[str] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[int] = {} if accepts_eta: UpperCAmelCase : Dict = eta for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : Dict = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # predict the noise residual UpperCAmelCase : Tuple = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Optional[int] = noise_pred.chunk(2 ) UpperCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Tuple = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = 1 / 0.1_8215 * latents UpperCAmelCase : Optional[Any] = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase : List[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return image return StableDiffusionPipelineOutput(images=_SCREAMING_SNAKE_CASE , nsfw_content_detected=_SCREAMING_SNAKE_CASE )
109
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _a : int= datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase ( datasets.BuilderConfig ): UpperCAmelCase : Optional[datasets.Features] = None UpperCAmelCase : str = "utf-8" UpperCAmelCase : Optional[str] = None UpperCAmelCase : Optional[str] = None UpperCAmelCase : bool = True # deprecated UpperCAmelCase : Optional[int] = None # deprecated UpperCAmelCase : int = 10 << 20 # 10MB UpperCAmelCase : Optional[bool] = None class UpperCamelCase ( datasets.ArrowBasedBuilder ): UpperCAmelCase : int = JsonConfig def _lowercase (self : int) -> List[str]: if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') __snake_case : Any = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.') if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') return datasets.DatasetInfo(features=self.config.features) def _lowercase (self : Dict , _A : Any) -> Optional[Any]: if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") __snake_case : Dict = dl_manager.download_and_extract(self.config.data_files) if isinstance(_A , (str, list, tuple)): __snake_case : str = data_files if isinstance(_A , _A): __snake_case : int = [files] __snake_case : Tuple = [dl_manager.iter_files(_A) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})] __snake_case : str = [] for split_name, files in data_files.items(): if isinstance(_A , _A): __snake_case : Optional[int] = [files] __snake_case : int = [dl_manager.iter_files(_A) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files})) return splits def _lowercase (self : Optional[Any] , _A : pa.Table) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): __snake_case : List[Any] = self.config.features.arrow_schema.field(_A).type __snake_case : Any = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[str] = table_cast(_A , self.config.features.arrow_schema) return pa_table def _lowercase (self : Dict , _A : Any) -> Union[str, Any]: for file_idx, file in enumerate(itertools.chain.from_iterable(_A)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: __snake_case : Tuple = json.load(_A) # We keep only the field we are interested in __snake_case : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_A , (list, tuple)): __snake_case : Optional[int] = set().union(*[row.keys() for row in dataset]) __snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys} else: __snake_case : Optional[int] = dataset __snake_case : Tuple = pa.Table.from_pydict(_A) yield file_idx, self._cast_table(_A) # If the file has one json object per line else: with open(_A , 'rb') as f: __snake_case : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small __snake_case : Tuple = max(self.config.chunksize // 32 , 16 << 10) __snake_case : str = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: __snake_case : Union[str, Any] = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_A) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": __snake_case : int = batch.decode(self.config.encoding , errors=_A).encode('utf-8') try: while True: try: __snake_case : Tuple = paj.read_json( io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_A , pa.ArrowInvalid) and "straddling" not in str(_A) or block_size > len(_A) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.") block_size *= 2 except pa.ArrowInvalid as e: try: with open( _A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: __snake_case : List[Any] = json.load(_A) except json.JSONDecodeError: logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_A , _A): # list is the only sequence type supported in JSON try: __snake_case : List[str] = set().union(*[row.keys() for row in dataset]) __snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys} __snake_case : List[str] = pa.Table.from_pydict(_A) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}") raise ValueError(f"Not able to read records in the JSON file at {file}.") from None yield file_idx, self._cast_table(_A) break else: logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}") raise ValueError( f"Not able to read records in the JSON file at {file}. " f"You should probably indicate the field of the JSON file containing your records. " f"This JSON file contain the following fields: {str(list(dataset.keys()))}. " f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A) batch_idx += 1
172
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __UpperCAmelCase : '''simple docstring''' def __init__(self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[int]=None , ): A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def A (self : Tuple ): A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A = ids_tensor([self.batch_size] , self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A (self : Optional[int] ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCAmelCase , ) def A (self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ): A = FalconModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A (self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , ): A = True A = FalconModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) A = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A (self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ): A = FalconForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A (self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , ): A = True A = True A = FalconForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass A = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] , dim=-1 ) A = torch.cat([input_mask, next_mask] , dim=-1 ) A = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] A = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice A = ids_tensor((1,) , output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def A (self : List[str] ): A = self.prepare_config_and_inputs() ( A ) = config_and_inputs A = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else () __lowerCAmelCase = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def A (self : Tuple ): A = FalconModelTester(self ) A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def A (self : Union[str, Any] ): self.config_tester.run_common_tests() def A (self : List[Any] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def A (self : Union[str, Any] ): A = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: A = alibi self.model_tester.create_and_check_model(__UpperCAmelCase , *__UpperCAmelCase ) def A (self : List[Any] ): A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict["""input_ids"""] A = input_ids.ne(1 ).to(__UpperCAmelCase ) A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A = FalconForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A (self : Dict ): A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = """single_label_classification""" A = input_dict["""input_ids"""] A = input_ids.ne(1 ).to(__UpperCAmelCase ) A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A = FalconForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A (self : Union[str, Any] ): A = self.model_tester.prepare_config_and_inputs_for_common() A = input_dict["""input_ids"""] A = FalconForCausalLM(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) A = input_ids.shape[0] A = model._convert_to_rw_cache(result.past_key_values ) A = model._convert_cache_to_standard_format(__UpperCAmelCase , __UpperCAmelCase ) for layer in range(len(__UpperCAmelCase ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def A (self : List[Any] ): A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = """multi_label_classification""" A = input_dict["""input_ids"""] A = input_ids.ne(1 ).to(__UpperCAmelCase ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A = FalconForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A (self : Tuple ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: A = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__UpperCAmelCase , """use_cache""" ): return A = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) if "use_cache" not in inputs: A = True A = model(**__UpperCAmelCase ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return A = ( getattr(__UpperCAmelCase , """decoder_layers""" , __UpperCAmelCase ) or getattr(__UpperCAmelCase , """num_decoder_layers""" , __UpperCAmelCase ) or config.num_hidden_layers ) A = getattr(__UpperCAmelCase , """num_kv_heads""" , config.num_attention_heads ) A = getattr(__UpperCAmelCase , """d_model""" , config.hidden_size ) A = embed_dim // num_attention_heads A = outputs["""past_key_values"""] self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) A = inputs["""input_ids"""].shape for i in range(__UpperCAmelCase ): if config.new_decoder_architecture: A = config.num_attention_heads elif config.multi_query: A = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def A (self : Tuple ): A = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) A = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) model.eval() model.to(__UpperCAmelCase ) A = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase ) A = ( """My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.""" ) A = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=19 ) A = tokenizer.batch_decode(__UpperCAmelCase )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def A (self : int ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: A = AutoTokenizer.from_pretrained(__UpperCAmelCase ) A = FalconForCausalLM.from_pretrained(__UpperCAmelCase ) model.eval() model.to(__UpperCAmelCase ) A = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 ) model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 ) model.generate(**__UpperCAmelCase , num_beams=2 , max_new_tokens=4 ) @slow def A (self : Union[str, Any] ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: A = AutoTokenizer.from_pretrained(__UpperCAmelCase ) A = FalconForCausalLM.from_pretrained(__UpperCAmelCase ) model.eval() model.to(device=__UpperCAmelCase ) A = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase ) # Test results are the same with and without cache A = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase ) A = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
366
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : Any = { 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = '''perceiver''' def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ): super().__init__(**_lowerCAmelCase ) A = num_latents A = d_latents A = d_model A = num_blocks A = num_self_attends_per_block A = num_self_attention_heads A = num_cross_attention_heads A = qk_channels A = v_channels A = cross_attention_shape_for_attention A = self_attention_widening_factor A = cross_attention_widening_factor A = hidden_act A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = use_query_residual # masked language modeling attributes A = vocab_size A = max_position_embeddings # image classification attributes A = image_size # flow attributes A = train_size # multimodal autoencoding attributes A = num_frames A = audio_samples_per_frame A = samples_per_patch A = output_shape class __UpperCAmelCase ( A__ ): '''simple docstring''' @property def A (self : List[str] ): if self.task == "multiple-choice": A = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""inputs""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] ) @property def A (self : Dict ): return 1e-4 def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase ) A = compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A = [""" """.join(["""a"""] ) * seq_length] * batch_size A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) A = inputs.pop("""input_ids""" ) return inputs elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch ) A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) ) A = inputs.pop("""pixel_values""" ) return inputs else: raise ValueError( """Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
337
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _A = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase =terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase )
62
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
0
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def __lowercase ( a__ = True , *a__ , **a__ ) -> Dict: if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) __SCREAMING_SNAKE_CASE = False if main_process_only: __SCREAMING_SNAKE_CASE = PartialState().local_process_index == 0 return _tqdm(*a__ , **a__ , disable=a__ )
118
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowerCAmelCase__ : Dict =random.Random() if is_torch_available(): import torch def __lowercase ( a__ , a__=1.0 , a__=None , a__=None ) -> Any: if rng is None: __SCREAMING_SNAKE_CASE = global_rng __SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _A , _A=7 , _A=400 , _A=2_000 , _A=1 , _A=0.0 , _A=16_000 , _A=True , _A=True , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = min_seq_length __SCREAMING_SNAKE_CASE = max_seq_length __SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __SCREAMING_SNAKE_CASE = feature_size __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = return_attention_mask __SCREAMING_SNAKE_CASE = do_normalize def _A ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _A ( self , _A=False , _A=False ): '''simple docstring''' def _flatten(_A ): return list(itertools.chain(*_A ) ) if equal_length: __SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __SCREAMING_SNAKE_CASE = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __SCREAMING_SNAKE_CASE = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase__ : Optional[int] = ASTFeatureExtractor def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __SCREAMING_SNAKE_CASE = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input __SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values __SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched __SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values __SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] __SCREAMING_SNAKE_CASE = np.asarray(_A ) __SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values __SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def _A ( self ): '''simple docstring''' import torch __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _A ( self , _A ): '''simple docstring''' from datasets import load_dataset __SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech __SCREAMING_SNAKE_CASE = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] @require_torch def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on __SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) __SCREAMING_SNAKE_CASE = ASTFeatureExtractor() __SCREAMING_SNAKE_CASE = feature_extractor(_A , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
118
1
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _a ( __snake_case ): def __init__( self : Tuple , lowercase : int , lowercase : Union[str, Any]=None , lowercase : int=True , lowercase : Any=None , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = config_class UpperCAmelCase = has_text_modality UpperCAmelCase = kwargs UpperCAmelCase = common_properties def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.config_class(**self.inputs_dict ) UpperCAmelCase = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) , msg=f"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(lowerCamelCase_ ): try: setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) self.parent.assertEqual( getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , msg=f"`{name} value {idx} expected, but was {getattr(lowerCamelCase_ , lowerCamelCase_ )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(lowerCamelCase_ ): try: UpperCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , msg=f"`{name} value {idx} expected, but was {getattr(lowerCamelCase_ , lowerCamelCase_ )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.config_class(**self.inputs_dict ) UpperCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , lowerCamelCase_ ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = os.path.join(lowerCamelCase_ , '''config.json''' ) config_first.to_json_file(lowerCamelCase_ ) UpperCAmelCase = self.config_class.from_json_file(lowerCamelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(lowerCamelCase_ ) UpperCAmelCase = self.config_class.from_pretrained(lowerCamelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = self.config_class(**self.inputs_dict ) UpperCAmelCase = """test""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) config_first.save_pretrained(lowerCamelCase_ ) UpperCAmelCase = self.config_class.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) UpperCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def A ( self : List[str] ): '''simple docstring''' if self.config_class.is_composition: return UpperCAmelCase = self.config_class() self.parent.assertIsNotNone(lowerCamelCase_ ) def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = copy.deepcopy(lowerCamelCase_ ) UpperCAmelCase = self.config_class(**lowerCamelCase_ ) UpperCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(lowerCamelCase_ , lowerCamelCase_ ) != value: wrong_values.append((key, getattr(lowerCamelCase_ , lowerCamelCase_ ), value) ) if len(lowerCamelCase_ ) > 0: UpperCAmelCase = """\n""".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(f"The following keys were not properly set in the config:\n{errors}" ) def A ( self : Union[str, Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
34
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: # Initialise PyTorch model UpperCamelCase : Any = BigBirdConfig.from_json_file(_lowerCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: UpperCamelCase : Union[str, Any] = BigBirdForQuestionAnswering(_lowerCAmelCase ) else: UpperCamelCase : Optional[int] = BigBirdForPreTraining(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_lowerCAmelCase , _lowerCAmelCase , is_trivia_qa=_lowerCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) __lowerCamelCase : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
140
import math def A_ ( _lowerCAmelCase ) -> list: UpperCamelCase : str = [True] * n UpperCamelCase : Optional[int] = False UpperCamelCase : str = False UpperCamelCase : List[Any] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCamelCase : int = i * 2 while index < n: UpperCamelCase : Optional[Any] = False UpperCamelCase : Optional[Any] = index + i UpperCamelCase : Optional[Any] = [2] for i in range(3 , _lowerCAmelCase , 2 ): if is_prime[i]: primes.append(_lowerCAmelCase ) return primes def A_ ( _lowerCAmelCase = 9999_6666_3333 ) -> int: UpperCamelCase : Tuple = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100 UpperCamelCase : Dict = prime_sieve(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = primes[prime_index] while (last_prime**2) <= limit: UpperCamelCase : Dict = primes[prime_index + 1] UpperCamelCase : Any = last_prime**2 UpperCamelCase : Union[str, Any] = next_prime**2 # Get numbers divisible by lps(current) UpperCamelCase : Dict = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCamelCase : List[str] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCamelCase : Dict = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCamelCase : str = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
140
1
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def UpperCamelCase_( snake_case : Dict , snake_case : Dict ): '''simple docstring''' snake_case_ = k_size // 2 snake_case_ , snake_case_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] snake_case_ = 1 / (2 * pi * sigma) * exp(-(square(snake_case ) + square(snake_case )) / (2 * square(snake_case )) ) return g def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : List[Any] ): '''simple docstring''' snake_case_ , snake_case_ = image.shape[0], image.shape[1] # dst image height and width snake_case_ = height - k_size + 1 snake_case_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows snake_case_ = zeros((dst_height * dst_width, k_size * k_size) ) snake_case_ = 0 for i, j in product(range(snake_case ) , range(snake_case ) ): snake_case_ = ravel(image[i : i + k_size, j : j + k_size] ) snake_case_ = window row += 1 # turn the kernel into shape(k*k, 1) snake_case_ = gen_gaussian_kernel(snake_case , snake_case ) snake_case_ = ravel(snake_case ) # reshape and get the dst image snake_case_ = dot(snake_case , snake_case ).reshape(snake_case , snake_case ).astype(snake_case ) return dst if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE : Union[str, Any] = imread(r"../image_data/lena.jpg") # turn image in gray scale value _SCREAMING_SNAKE_CASE : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _SCREAMING_SNAKE_CASE : Optional[Any] = gaussian_filter(gray, 3, sigma=1) _SCREAMING_SNAKE_CASE : Any = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
85
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) lowerCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) lowerCAmelCase_ : int = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) lowerCAmelCase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Any ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCAmelCase_ : Optional[int] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCAmelCase_ : int = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(F"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : List[Any] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" ) lowerCAmelCase_ : Any = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : Dict = Accelerator() lowercase__ : List[Any] = (accelerator.state.process_index + 2, 1_0) lowercase__ : Any = torch.randint(0, 1_0, shape).to(accelerator.device) lowercase__ : List[Any] = """""" lowercase__ : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowercase__ : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowercase__ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
224
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: __SCREAMING_SNAKE_CASE :Optional[Any] = None __SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :str = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''google/fnet-base''': 512, '''google/fnet-large''': 512, } __SCREAMING_SNAKE_CASE :Tuple = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = VOCAB_FILES_NAMES _lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Tuple = ["""input_ids""", """token_type_ids"""] _lowerCamelCase : Any = FNetTokenizer def __init__( self : List[str] , snake_case_ : List[str]=None , snake_case_ : Optional[int]=None , snake_case_ : str=False , snake_case_ : List[str]=True , snake_case_ : Tuple=True , snake_case_ : List[Any]="<unk>" , snake_case_ : Dict="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : Optional[int]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Union[str, Any] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
352
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class A_ ( lowerCAmelCase_ ): def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int=1_3 , snake_case_ : int=7 , snake_case_ : Union[str, Any]=True , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=True , snake_case_ : int=9_9 , snake_case_ : Tuple=3_2 , snake_case_ : Dict=5 , snake_case_ : str=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=5_1_2 , snake_case_ : List[Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : Any=0.0_2 , snake_case_ : List[str]=False , snake_case_ : Dict=True , snake_case_ : Union[str, Any]="None" , snake_case_ : Dict=3 , snake_case_ : Union[str, Any]=4 , snake_case_ : Dict=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = relative_attention _UpperCAmelCase = position_biased_input _UpperCAmelCase = pos_att_type _UpperCAmelCase = scope def lowercase ( self : Optional[Any] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase ( self : Optional[Any] ): return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowercase ( self : Optional[int] , snake_case_ : Dict ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowercase ( self : str , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ): _UpperCAmelCase = DebertaVaModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0] _UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )[0] _UpperCAmelCase = model(snake_case_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ): _UpperCAmelCase = DebertaVaForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = DebertaVaForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(snake_case_ ) def lowercase ( self : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = DebertaVaForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Any ): _UpperCAmelCase = DebertaVaForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str ): _UpperCAmelCase = DebertaVaForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase ( self : str ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase : Tuple = ( { """feature-extraction""": DebertaVaModel, """fill-mask""": DebertaVaForMaskedLM, """question-answering""": DebertaVaForQuestionAnswering, """text-classification""": DebertaVaForSequenceClassification, """token-classification""": DebertaVaForTokenClassification, """zero-shot""": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase : Any = True _lowerCamelCase : int = False _lowerCamelCase : int = False _lowerCamelCase : Dict = False _lowerCamelCase : int = False def lowercase ( self : List[str] ): _UpperCAmelCase = DebertaVaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 ) def lowercase ( self : str ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*snake_case_ ) def lowercase ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ ) def lowercase ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ ) def lowercase ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ ) def lowercase ( self : List[str] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ ) @slow def lowercase ( self : Union[str, Any] ): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = DebertaVaModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def lowercase ( self : Union[str, Any] ): pass @slow def lowercase ( self : List[str] ): _UpperCAmelCase = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" ) _UpperCAmelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0] # compare the actual values for a slice. _UpperCAmelCase = torch.tensor( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
156
0
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __snake_case =sys.version_info >= (3, 10) def a_ ( lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None ): return field(default_factory=lambda: default , metadata=lowerCamelCase ) @dataclass class UpperCAmelCase_ : lowerCamelCase : int lowerCamelCase : float lowerCamelCase : str lowerCamelCase : bool @dataclass class UpperCAmelCase_ : lowerCamelCase : int = 42 lowerCamelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} ) @dataclass class UpperCAmelCase_ : lowerCamelCase : bool = False lowerCamelCase : bool = True lowerCamelCase : Optional[bool] = None class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[Any] = '''titi''' lowerCamelCase : List[str] = '''toto''' class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[str] = '''titi''' lowerCamelCase : Any = '''toto''' lowerCamelCase : Union[str, Any] = 42 @dataclass class UpperCAmelCase_ : lowerCamelCase : BasicEnum = "toto" def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: lowerCAmelCase = BasicEnum(self.foo ) @dataclass class UpperCAmelCase_ : lowerCamelCase : MixedTypeEnum = "toto" def __UpperCAmelCase ( self : int ) -> Dict: lowerCAmelCase = MixedTypeEnum(self.foo ) @dataclass class UpperCAmelCase_ : lowerCamelCase : Optional[int] = None lowerCamelCase : Optional[float] = field(default=__lowercase , metadata={'''help''': '''help message'''} ) lowerCamelCase : Optional[str] = None lowerCamelCase : Optional[List[str]] = list_field(default=[] ) lowerCamelCase : Optional[List[int]] = list_field(default=[] ) @dataclass class UpperCAmelCase_ : lowerCamelCase : List[int] = list_field(default=[] ) lowerCamelCase : List[int] = list_field(default=[1, 2, 3] ) lowerCamelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCAmelCase_ : lowerCamelCase : List[int] = field() lowerCamelCase : str = field() lowerCamelCase : BasicEnum = field() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: lowerCAmelCase = BasicEnum(self.required_enum ) @dataclass class UpperCAmelCase_ : lowerCamelCase : int lowerCamelCase : "BasicEnum" = field() lowerCamelCase : "Optional[bool]" = None lowerCamelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} ) lowerCamelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class UpperCAmelCase_ : lowerCamelCase : bool = False lowerCamelCase : bool = True lowerCamelCase : bool | None = None @dataclass class UpperCAmelCase_ : lowerCamelCase : int | None = None lowerCamelCase : float | None = field(default=__lowercase , metadata={'''help''': '''help message'''} ) lowerCamelCase : str | None = None lowerCamelCase : list[str] | None = list_field(default=[] ) lowerCamelCase : list[int] | None = list_field(default=[] ) class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : argparse.ArgumentParser , UpperCAmelCase__ : argparse.ArgumentParser ) -> Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'} lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , UpperCAmelCase__ ) and yy.get('choices' , UpperCAmelCase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](UpperCAmelCase__ ) , yy['type'](UpperCAmelCase__ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--bar' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--baz' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--flag' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ ) self.assertFalse(example.flag ) def __UpperCAmelCase ( self : int ) -> int: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=4_2 , type=UpperCAmelCase__ ) expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Any ) -> str: lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' ) expected.add_argument('--baz' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=UpperCAmelCase__ , dest='baz' ) expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ ) lowerCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase__ ) for dataclass_type in dataclass_types: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 4_2 ) lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __UpperCAmelCase ( self : int ) -> Dict: @dataclass class UpperCAmelCase_ : lowerCamelCase : Literal["titi", "toto", 42] = "toto" lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowerCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowerCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 4_2 ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=UpperCAmelCase__ ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=UpperCAmelCase__ ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual( UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=UpperCAmelCase__ , type=UpperCAmelCase__ ) expected.add_argument('--bar' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='help message' ) expected.add_argument('--baz' , default=UpperCAmelCase__ , type=UpperCAmelCase__ ) expected.add_argument('--ces' , nargs='+' , default=[] , type=UpperCAmelCase__ ) expected.add_argument('--des' , nargs='+' , default=[] , type=UpperCAmelCase__ ) lowerCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase__ ) for dataclass_type in dataclass_types: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) ) lowerCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def __UpperCAmelCase ( self : Any ) -> List[str]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--required_str' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , ) expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ ) expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, } lowerCAmelCase = parser.parse_dict(UpperCAmelCase__ )[0] lowerCAmelCase = BasicExample(**UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 4_2, } self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ ) def __UpperCAmelCase ( self : int ) -> List[str]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_json' ) os.mkdir(UpperCAmelCase__ ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] lowerCAmelCase = BasicExample(**UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_yaml' ) os.mkdir(UpperCAmelCase__ ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] lowerCAmelCase = BasicExample(**UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Any ) -> int: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ )
4
'''simple docstring''' import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def a_ ( lowerCamelCase : Dict ): lowerCAmelCase = {} lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids'] lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] ) return output __snake_case =HfArgumentParser(PretokenizationArguments) __snake_case =parser.parse_args() if args.num_workers is None: __snake_case =multiprocessing.cpu_count() __snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir) __snake_case =time.time() __snake_case =load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') __snake_case =time.time() __snake_case =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') __snake_case =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
4
1
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name _lowerCAmelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n" def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=8 ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Any = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _UpperCAmelCase : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase_ ( _UpperCamelCase ): def __init__( self : List[Any] , A : UNetaDConditionModel , A : DDPMScheduler , A : VQModel , ): super().__init__() self.register_modules( unet=A , scheduler=A , movq=A , ) _UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def snake_case_ ( self : int , A : List[str] , A : Tuple , A : Dict , A : Any , A : Optional[int] , A : List[Any] ): if latents is None: _UpperCAmelCase : int = randn_tensor(A , generator=A , device=A , dtype=A ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) _UpperCAmelCase : Union[str, Any] = latents.to(A ) _UpperCAmelCase : List[str] = latents * scheduler.init_noise_sigma return latents def snake_case_ ( self : Dict , A : Union[str, Any]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) _UpperCAmelCase : List[Any] = torch.device(f'cuda:{gpu_id}' ) _UpperCAmelCase : int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A , A ) def snake_case_ ( self : int , A : str=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) _UpperCAmelCase : Optional[int] = torch.device(f'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _UpperCAmelCase : str = None for cpu_offloaded_model in [self.unet, self.movq]: _UpperCAmelCase : List[str] = cpu_offload_with_hook(A , A , prev_module_hook=A ) # We'll offload the last model manually. _UpperCAmelCase : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case_ ( self : Optional[Any] ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(A , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A ) def __call__( self : Optional[Any] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 1_0_0 , A : float = 4.0 , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , ): _UpperCAmelCase : Optional[Any] = self._execution_device _UpperCAmelCase : str = guidance_scale > 1.0 if isinstance(A , A ): _UpperCAmelCase : Tuple = torch.cat(A , dim=0 ) _UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt if isinstance(A , A ): _UpperCAmelCase : List[Any] = torch.cat(A , dim=0 ) if do_classifier_free_guidance: _UpperCAmelCase : Union[str, Any] = image_embeds.repeat_interleave(A , dim=0 ) _UpperCAmelCase : List[Any] = negative_image_embeds.repeat_interleave(A , dim=0 ) _UpperCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A ) self.scheduler.set_timesteps(A , device=A ) _UpperCAmelCase : List[str] = self.scheduler.timesteps _UpperCAmelCase : List[Any] = self.unet.config.in_channels _UpperCAmelCase : Dict = downscale_height_and_width(A , A , self.movq_scale_factor ) # create initial latent _UpperCAmelCase : Optional[int] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , ) for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase : Any = {"image_embeds": image_embeds} _UpperCAmelCase : Dict = self.unet( sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0] if do_classifier_free_guidance: _UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 ) _UpperCAmelCase : Optional[Any] = noise_pred.chunk(2 ) _UpperCAmelCase : Any = variance_pred.chunk(2 ) _UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _UpperCAmelCase : str = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase : Optional[Any] = self.scheduler.step( A , A , A , generator=A , )[0] # post-processing _UpperCAmelCase : Union[str, Any] = self.movq.decode(A , force_not_quantize=A )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: _UpperCAmelCase : str = image * 0.5 + 0.5 _UpperCAmelCase : Dict = image.clamp(0 , 1 ) _UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _UpperCAmelCase : Any = self.numpy_to_pil(A ) if not return_dict: return (image,) return ImagePipelineOutput(images=A )
366
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" _lowerCAmelCase : Optional[int] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" _lowerCAmelCase : Any = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def snake_case_ ( self : List[str] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def snake_case_ ( self : str , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A ) }
202
0
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = StableDiffusionDiffEditPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} a_ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ = frozenset([]) def A ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : str , _A : List[str] , _A : Any=0 ) -> str: UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Any = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : str = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Dict = torch.manual_seed(_A ) else: UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[Any] = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any: UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[int] = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def A ( self : List[str] ) -> Optional[Any]: if not hasattr(self.pipeline_class , '''_optional_components''' ): return UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Any = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_A , _A , _A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase_ : str = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A ) UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0] UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max() self.assertLess(_A , 1e-4 ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : Optional[Any] = '''cpu''' UpperCAmelCase_ : Any = self.get_dummy_components() UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A ) UpperCAmelCase_ : int = pipe.generate_mask(**_A ) UpperCAmelCase_ : Tuple = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase_ : List[Any] = np.array([0] * 9 ) UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : str ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = '''cpu''' UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : int = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) def A ( self : Tuple ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : Any = '''cpu''' UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A ) UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A ) UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : List[Any] = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) @require_torch_gpu @slow class snake_case__ ( unittest.TestCase): def A ( self : Optional[Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) ) UpperCAmelCase_ : Any = raw_image def A ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Tuple = '''a bowl of pears''' UpperCAmelCase_ : Optional[int] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[str] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents UpperCAmelCase_ : Any = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : str = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def A ( self : Tuple ) -> List[str]: UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Dict = '''a bowl of pears''' UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents UpperCAmelCase_ : Dict = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : Tuple = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
304
1
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : dict, _lowerCAmelCase : str ): """simple docstring""" _a , _a = set(_lowerCAmelCase ), [start] while stack: _a = stack.pop() explored.add(_lowerCAmelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_lowerCAmelCase ) return explored __snake_case = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
153
"""simple docstring""" import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __snake_case = 500000 __snake_case ,__snake_case = os.path.split(__file__) __snake_case = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def A_ ( _lowerCAmelCase : datasets.Dataset, **_lowerCAmelCase : Dict ): """simple docstring""" _a = dataset.map(**_lowerCAmelCase ) @get_duration def A_ ( _lowerCAmelCase : datasets.Dataset, **_lowerCAmelCase : Dict ): """simple docstring""" _a = dataset.filter(**_lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _a = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) _a = generate_example_dataset( os.path.join(_lowerCAmelCase, '''dataset.arrow''' ), _lowerCAmelCase, num_examples=_lowerCAmelCase ) _a = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_lowerCAmelCase ) def tokenize(_lowerCAmelCase : Union[str, Any] ): return tokenizer(examples['''text'''] ) _a = map(_lowerCAmelCase ) _a = map(_lowerCAmelCase, batched=_lowerCAmelCase ) _a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase ) with dataset.formatted_as(type='''numpy''' ): _a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase ) with dataset.formatted_as(type='''pandas''' ): _a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase ) with dataset.formatted_as(type='''torch''', columns='''numbers''' ): _a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase ) with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ): _a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase ) _a = map(_lowerCAmelCase, function=_lowerCAmelCase, batched=_lowerCAmelCase ) _a = filter(_lowerCAmelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(_lowerCAmelCase, '''wb''' ) as f: f.write(json.dumps(_lowerCAmelCase ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
153
1
from functools import lru_cache def a__ ( A_ ): '''simple docstring''' __magic_name__ = 2 __magic_name__ = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(A_ ) if n > 1: factors.add(A_ ) return factors @lru_cache def a__ ( A_ ): '''simple docstring''' return len(unique_prime_factors(A_ ) ) def a__ ( A_ ): '''simple docstring''' return len(set(A_ ) ) in (0, 1) def a__ ( A_ ): '''simple docstring''' __magic_name__ = 2 while True: # Increment each value of a generated range __magic_name__ = [base + i for i in range(A_ )] # Run elements through out unique_prime_factors function # Append our target number to the end. __magic_name__ = [upf_len(A_ ) for x in group] checker.append(A_ ) # If all numbers in the list are equal, return the group variable. if equality(A_ ): return group # Increment our base variable by 1 base += 1 def a__ ( A_ = 4 ): '''simple docstring''' __magic_name__ = run(A_ ) return results[0] if len(A_ ) else None if __name__ == "__main__": print(solution())
88
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Dict ) -> Dict: """simple docstring""" return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] def __lowerCamelCase ( __snake_case : str, __snake_case : int, __snake_case : Dict, __snake_case : int="attention" ) -> str: """simple docstring""" A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] ) A__ : str =k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] ) A__ : List[Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] ) A__ : Optional[int] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] ) A__ : Dict =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] ) A__ : Dict =q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] ) A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] ) A__ : List[str] =v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __lowerCamelCase ( __snake_case : Dict, __snake_case : Any, __snake_case : Tuple, __snake_case : Optional[Any]=False ) -> Any: """simple docstring""" if split_mlp_wi: A__ : Any =params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] A__ : int =params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] A__ : Optional[Any] =(wi_a, wi_a) else: A__ : Optional[int] =params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] A__ : int =params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] return wi, wo def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any, __snake_case : int ) -> List[Any]: """simple docstring""" return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i] def __lowerCamelCase ( __snake_case : dict, *, __snake_case : int, __snake_case : bool, __snake_case : bool = False ) -> Union[str, Any]: """simple docstring""" A__ : Optional[int] =traverse_util.flatten_dict(variables["""target"""] ) A__ : int ={"""/""".join(__snake_case ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi A__ : List[Any] ="""encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""", __snake_case ) A__ : Optional[int] =collections.OrderedDict() # Shared embeddings. A__ : List[Any] =old["""token_embedder/embedding"""] # Encoder. for i in range(__snake_case ): # Block i, layer 0 (Self Attention). A__ : Optional[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_attention_layer_norm""" ) A__ , A__ , A__ , A__ : Optional[int] =tax_attention_lookup(__snake_case, __snake_case, """encoder""", """attention""" ) A__ : List[str] =layer_norm A__ : Dict =k.T A__ : Optional[int] =o.T A__ : str =q.T A__ : Any =v.T # Block i, layer 1 (MLP). A__ : List[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_mlp_layer_norm""" ) A__ , A__ : int =tax_mlp_lookup(__snake_case, __snake_case, """encoder""", __snake_case ) A__ : Optional[int] =layer_norm if split_mlp_wi: A__ : List[str] =wi[0].T A__ : List[str] =wi[1].T else: A__ : Optional[int] =wi.T A__ : Optional[Any] =wo.T if scalable_attention: # convert the rel_embedding of each layer A__ : int =tax_relpos_bias_lookup( __snake_case, __snake_case, """encoder""" ).T A__ : Optional[int] =old["""encoder/encoder_norm/scale"""] if not scalable_attention: A__ : List[Any] =tax_relpos_bias_lookup( __snake_case, 0, """encoder""" ).T A__ : Tuple =tax_relpos_bias_lookup( __snake_case, 0, """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(__snake_case ): # Block i, layer 0 (Self Attention). A__ : List[str] =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_self_attention_layer_norm""" ) A__ , A__ , A__ , A__ : List[str] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """self_attention""" ) A__ : str =layer_norm A__ : List[str] =k.T A__ : int =o.T A__ : Tuple =q.T A__ : Optional[Any] =v.T # Block i, layer 1 (Cross Attention). A__ : int =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_cross_attention_layer_norm""" ) A__ , A__ , A__ , A__ : Optional[Any] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """encoder_decoder_attention""" ) A__ : str =layer_norm A__ : Union[str, Any] =k.T A__ : str =o.T A__ : Any =q.T A__ : str =v.T # Block i, layer 2 (MLP). A__ : str =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_mlp_layer_norm""" ) A__ , A__ : Optional[int] =tax_mlp_lookup(__snake_case, __snake_case, """decoder""", __snake_case ) A__ : Dict =layer_norm if split_mlp_wi: A__ : List[Any] =wi[0].T A__ : Union[str, Any] =wi[1].T else: A__ : Optional[int] =wi.T A__ : str =wo.T if scalable_attention: # convert the rel_embedding of each layer A__ : str =tax_relpos_bias_lookup(__snake_case, __snake_case, """decoder""" ).T A__ : str =old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: A__ : Tuple =old["""decoder/logits_dense/kernel"""].T return new def __lowerCamelCase ( __snake_case : Dict, __snake_case : bool ) -> Optional[Any]: """simple docstring""" A__ : Any =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: A__ : Union[str, Any] =state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: A__ : List[str] =state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) A__ : Optional[Any] =state_dict["""shared.weight"""] return state_dict def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : Optional[Any], __snake_case : int, __snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" A__ : str =checkpoints.load_tax_checkpoint(__snake_case ) A__ : Optional[Any] =convert_tax_to_pytorch( __snake_case, num_layers=config.num_layers, is_encoder_only=__snake_case, scalable_attention=__snake_case ) A__ : str =make_state_dict(__snake_case, __snake_case ) model.load_state_dict(__snake_case, strict=__snake_case ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : Optional[int], __snake_case : bool = False, __snake_case : bool = False, ) -> Dict: """simple docstring""" A__ : Tuple =MTaConfig.from_json_file(__snake_case ) print(f"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: A__ : List[Any] =UMTaEncoderModel(__snake_case ) else: A__ : int =UMTaForConditionalGeneration(__snake_case ) # Load weights from tf checkpoint load_tax_weights_in_ta(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__snake_case ) # Verify that we can load the checkpoint. model.from_pretrained(__snake_case ) print("""Done""" ) if __name__ == "__main__": __snake_case : str = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) __snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
134
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=1 / 255 , UpperCAmelCase__ : str=True , ) ->List[Any]: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=False) ->Dict: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
231
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase : Union[str, Any] = """▁""" _lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""} _lowerCamelCase : str = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""} } _lowerCamelCase : List[str] = { """google/pegasus-xsum""": 512, } _lowerCamelCase : int = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<mask_2>" , UpperCAmelCase__ : List[str]="<mask_1>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=103 , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None: '''simple docstring''' A__ = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise TypeError( f"""additional_special_tokens should be of type {type(UpperCAmelCase__)}, but is""" f""" {type(UpperCAmelCase__)}""") A__ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(UpperCAmelCase__) , self.offset - 1) ] if len(set(UpperCAmelCase__)) != len(UpperCAmelCase__): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""") A__ = additional_special_tokens_extended else: A__ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)] A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) A__ = mask_token_sent A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCAmelCase__) # add special tokens to encoder dict A__ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, }) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)}) A__ = {v: k for k, v in self.encoder.items()} @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return len(self.sp_model) + self.offset def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]: '''simple docstring''' A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : int , UpperCAmelCase__ : Optional[int]) ->Optional[int]: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->int: '''simple docstring''' if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] A__ = self.sp_model.piece_to_id(UpperCAmelCase__) return sp_id + self.offset def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int) ->str: '''simple docstring''' if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: A__ = self.sp_model.IdToPiece(index - self.offset) return token def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = [] A__ = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCAmelCase__) + token A__ = [] else: current_sub_tokens.append(UpperCAmelCase__) out_string += self.sp_model.decode(UpperCAmelCase__) return out_string.strip() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=False) ->Union[str, Any]: '''simple docstring''' return 1 def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List , UpperCAmelCase__ : Optional[List] = None , UpperCAmelCase__ : bool = False) ->List[int]: '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase__) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase__) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None) ->List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return A__ = os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(UpperCAmelCase__ , '''wb''') as fi: A__ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__) return (out_vocab_file,)
231
1
import copy import random from transformers import CLIPTokenizer class __magic_name__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self, *lowercase_, **lowercase_ ) -> Tuple: """simple docstring""" super().__init__(*lowercase_, **lowercase_ ) a__ ={} def _UpperCAmelCase ( self, lowercase_, *lowercase_, **lowercase_ ) -> Any: """simple docstring""" a__ =super().add_tokens(lowercase_, *lowercase_, **lowercase_ ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ''' `placeholder_token` that is not already in the tokenizer.''' ) def _UpperCAmelCase ( self, lowercase_, *lowercase_, lowercase_=1, **lowercase_ ) -> int: """simple docstring""" a__ =[] if num_vec_per_token == 1: self.try_adding_tokens(lowercase_, *lowercase_, **lowercase_ ) output.append(lowercase_ ) else: a__ =[] for i in range(lowercase_ ): a__ =placeholder_token + F"""_{i}""" self.try_adding_tokens(lowercase_, *lowercase_, **lowercase_ ) output.append(lowercase_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) a__ =output def _UpperCAmelCase ( self, lowercase_, lowercase_=False, lowercase_=1.0 ) -> Union[str, Any]: """simple docstring""" if isinstance(lowercase_, lowercase_ ): a__ =[] for i in range(len(lowercase_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=lowercase_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: a__ =self.token_map[placeholder_token] a__ =tokens[: 1 + int(len(lowercase_ ) * prop_tokens_to_load )] if vector_shuffle: a__ =copy.copy(lowercase_ ) random.shuffle(lowercase_ ) a__ =text.replace(lowercase_, ''' '''.join(lowercase_ ) ) return text def __call__( self, lowercase_, *lowercase_, lowercase_=False, lowercase_=1.0, **lowercase_ ) -> Dict: """simple docstring""" return super().__call__( self.replace_placeholder_tokens_in_text( lowercase_, vector_shuffle=lowercase_, prop_tokens_to_load=lowercase_ ), *lowercase_, **lowercase_, ) def _UpperCAmelCase ( self, lowercase_, *lowercase_, lowercase_=False, lowercase_=1.0, **lowercase_ ) -> Dict: """simple docstring""" return super().encode( self.replace_placeholder_tokens_in_text( lowercase_, vector_shuffle=lowercase_, prop_tokens_to_load=lowercase_ ), *lowercase_, **lowercase_, )
188
from __future__ import annotations def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
188
1
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) set_seed(7_70) lowerCamelCase_ : str = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } lowerCamelCase_ : Any = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } lowerCamelCase_ : str = os.path.dirname(os.path.abspath(__file__)) lowerCamelCase_ : Any = os.path.join(os.path.expanduser("""~"""), """.cache""") lowerCamelCase_ : Dict = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def A__ ( lowerCamelCase , lowerCamelCase=False ) -> int: UpperCamelCase_: Union[str, Any] = model_type if use_small: key += "_small" return os.path.join(lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] ) def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple: os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) hf_hub_download(repo_id=lowerCamelCase , filename=lowerCamelCase , local_dir=lowerCamelCase ) def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase="text" ) -> Optional[int]: if model_type == "text": UpperCamelCase_: str = BarkSemanticModel UpperCamelCase_: Dict = BarkSemanticConfig UpperCamelCase_: int = BarkSemanticGenerationConfig elif model_type == "coarse": UpperCamelCase_: str = BarkCoarseModel UpperCamelCase_: int = BarkCoarseConfig UpperCamelCase_: Any = BarkCoarseGenerationConfig elif model_type == "fine": UpperCamelCase_: Optional[Any] = BarkFineModel UpperCamelCase_: int = BarkFineConfig UpperCamelCase_: Dict = BarkFineGenerationConfig else: raise NotImplementedError() UpperCamelCase_: str = F'''{model_type}_small''' if use_small else model_type UpperCamelCase_: List[Any] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(lowerCamelCase ): logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info["""repo_id"""] , model_info["""file_name"""] ) UpperCamelCase_: int = torch.load(lowerCamelCase , map_location=lowerCamelCase ) # this is a hack UpperCamelCase_: Tuple = checkpoint["""model_args"""] if "input_vocab_size" not in model_args: UpperCamelCase_: int = model_args["""vocab_size"""] UpperCamelCase_: Optional[int] = model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments UpperCamelCase_: Tuple = model_args.pop("""n_head""" ) UpperCamelCase_: Dict = model_args.pop("""n_embd""" ) UpperCamelCase_: List[str] = model_args.pop("""n_layer""" ) UpperCamelCase_: Optional[Any] = ConfigClass(**checkpoint["""model_args"""] ) UpperCamelCase_: Optional[Any] = ModelClass(config=lowerCamelCase ) UpperCamelCase_: List[Any] = GenerationConfigClass() UpperCamelCase_: Optional[Any] = model_generation_config UpperCamelCase_: Optional[int] = checkpoint["""model"""] # fixup checkpoint UpperCamelCase_: Dict = """_orig_mod.""" for k, v in list(state_dict.items() ): if k.startswith(lowerCamelCase ): # replace part of the key with corresponding layer name in HF implementation UpperCamelCase_: Optional[int] = k[len(lowerCamelCase ) :] for old_layer_name in new_layer_name_dict: UpperCamelCase_: Dict = new_k.replace(lowerCamelCase , new_layer_name_dict[old_layer_name] ) UpperCamelCase_: List[str] = state_dict.pop(lowerCamelCase ) UpperCamelCase_: Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() ) UpperCamelCase_: Dict = {k for k in extra_keys if not k.endswith(""".attn.bias""" )} UpperCamelCase_: Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) UpperCamelCase_: Union[str, Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )} if len(lowerCamelCase ) != 0: raise ValueError(F'''extra keys found: {extra_keys}''' ) if len(lowerCamelCase ) != 0: raise ValueError(F'''missing keys: {missing_keys}''' ) model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) UpperCamelCase_: str = model.num_parameters(exclude_embeddings=lowerCamelCase ) UpperCamelCase_: int = checkpoint["""best_val_loss"""].item() logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCamelCase , 3 )} loss''' ) model.eval() model.to(lowerCamelCase ) del checkpoint, state_dict return model def A__ ( lowerCamelCase , lowerCamelCase=False , lowerCamelCase="text" ) -> Any: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() UpperCamelCase_: Union[str, Any] = """cpu""" # do conversion on cpu UpperCamelCase_: int = _get_ckpt_path(lowerCamelCase , use_small=lowerCamelCase ) UpperCamelCase_: Dict = _load_model(lowerCamelCase , lowerCamelCase , model_type=lowerCamelCase , use_small=lowerCamelCase ) # load bark initial model UpperCamelCase_: List[Any] = _bark_load_model(lowerCamelCase , """cpu""" , model_type=lowerCamelCase , use_small=lowerCamelCase ) if model_type == "text": UpperCamelCase_: Tuple = bark_model["""model"""] if model.num_parameters(exclude_embeddings=lowerCamelCase ) != bark_model.get_num_params(): raise ValueError("""initial and new models don't have the same number of parameters""" ) # check if same output as the bark model UpperCamelCase_: Optional[Any] = 5 UpperCamelCase_: List[str] = 10 if model_type in ["text", "coarse"]: UpperCamelCase_: int = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int ) UpperCamelCase_: Tuple = bark_model(lowerCamelCase )[0] UpperCamelCase_: Optional[Any] = model(lowerCamelCase ) # take last logits UpperCamelCase_: Union[str, Any] = output_new_model_total.logits[:, [-1], :] else: UpperCamelCase_: Tuple = 3 UpperCamelCase_: List[Any] = 8 UpperCamelCase_: List[str] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) UpperCamelCase_: int = model(lowerCamelCase , lowerCamelCase ) UpperCamelCase_: Any = bark_model(lowerCamelCase , lowerCamelCase ) UpperCamelCase_: Optional[int] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don't have the same shape""" ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError("""initial and new outputs are not equal""" ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str: UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , lowerCamelCase ) UpperCamelCase_: Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) ) UpperCamelCase_: List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) ) UpperCamelCase_: Optional[int] = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) ) UpperCamelCase_: Any = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" ) UpperCamelCase_: Optional[Any] = BarkSemanticModel.from_pretrained(lowerCamelCase ) UpperCamelCase_: Tuple = BarkCoarseModel.from_pretrained(lowerCamelCase ) UpperCamelCase_: List[str] = BarkFineModel.from_pretrained(lowerCamelCase ) UpperCamelCase_: Tuple = EncodecModel.from_pretrained("""facebook/encodec_24khz""" ) UpperCamelCase_: int = BarkConfig.from_sub_model_configs( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) UpperCamelCase_: Optional[int] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) UpperCamelCase_: Optional[Any] = BarkModel(lowerCamelCase ) UpperCamelCase_: int = semantic UpperCamelCase_: Tuple = coarseAcoustic UpperCamelCase_: Optional[int] = fineAcoustic UpperCamelCase_: Any = codec UpperCamelCase_: Dict = bark_generation_config Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) bark.save_pretrained(lowerCamelCase , repo_id=lowerCamelCase , push_to_hub=lowerCamelCase ) if __name__ == "__main__": lowerCamelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") lowerCamelCase_ : Dict = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
223
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _UpperCamelCase ( _A , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = RoFormerTokenizer __UpperCamelCase : Union[str, Any] = RoFormerTokenizerFast __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = True def lowerCAmelCase__ ( self : Optional[Any] ): super().setUp() def lowerCAmelCase__ ( self : List[str] , **snake_case_ : Union[str, Any] ): return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case_ ) def lowerCAmelCase__ ( self : Dict , **snake_case_ : Union[str, Any] ): return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case_ ) def lowerCAmelCase__ ( self : Any ): UpperCamelCase_: int = """永和服装饰品有限公司,今天天气非常好""" UpperCamelCase_: int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def lowerCAmelCase__ ( self : Dict ): UpperCamelCase_: Dict = self.get_tokenizer() UpperCamelCase_, UpperCamelCase_: Optional[int] = self.get_chinese_input_output_texts() UpperCamelCase_: Optional[int] = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , output_text.split() ) UpperCamelCase_: Dict = tokens + [tokenizer.unk_token] UpperCamelCase_: int = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) def lowerCAmelCase__ ( self : Dict ): UpperCamelCase_: Optional[int] = self.get_rust_tokenizer() UpperCamelCase_, UpperCamelCase_: Any = self.get_chinese_input_output_texts() UpperCamelCase_: int = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , output_text.split() ) UpperCamelCase_: Union[str, Any] = tokens + [tokenizer.unk_token] UpperCamelCase_: Optional[Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) def lowerCAmelCase__ ( self : Dict ): pass def lowerCAmelCase__ ( self : Tuple ): pass def lowerCAmelCase__ ( self : Union[str, Any] ): pass
223
1
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(__lowerCamelCase ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
16
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
337
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=9_9 , lowerCAmelCase__=0 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__="last" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0 , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = parent SCREAMING_SNAKE_CASE_ : Any = batch_size SCREAMING_SNAKE_CASE_ : Dict = seq_length SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : Dict = use_input_lengths SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : List[str] = use_labels SCREAMING_SNAKE_CASE_ : Any = gelu_activation SCREAMING_SNAKE_CASE_ : Union[str, Any] = sinusoidal_embeddings SCREAMING_SNAKE_CASE_ : List[Any] = causal SCREAMING_SNAKE_CASE_ : Optional[Any] = asm SCREAMING_SNAKE_CASE_ : Any = n_langs SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : Dict = n_special SCREAMING_SNAKE_CASE_ : Tuple = hidden_size SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels SCREAMING_SNAKE_CASE_ : str = num_choices SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_type SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_proj SCREAMING_SNAKE_CASE_ : str = scope SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_token_id def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ : Any = None if self.use_input_lengths: SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE_ : Any = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase__ ( self ): """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = XLMModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , lengths=lowerCAmelCase__ , langs=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , langs=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = XLMWithLMHeadModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = XLMForQuestionAnsweringSimple(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = XLMForQuestionAnswering(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = model( lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , p_mask=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Dict = model( lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , ) (SCREAMING_SNAKE_CASE_ ) : Tuple = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ ) (SCREAMING_SNAKE_CASE_ ) : List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = XLMForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[int] = XLMForTokenClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_choices SCREAMING_SNAKE_CASE_ : Tuple = XLMForMultipleChoice(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE_ ) : int = config_and_inputs SCREAMING_SNAKE_CASE_ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCAmelCase = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCAmelCase = ( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = XLMModelTester(self ) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , emb_dim=3_7 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=1 ): """simple docstring""" self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual( [isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase__ ) ) self.assertEqual(len(lowerCAmelCase__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase__ ): # adds PAD dummy token SCREAMING_SNAKE_CASE_ : Dict = min_length + idx + 1 SCREAMING_SNAKE_CASE_ : int = min_length + idx + 1 SCREAMING_SNAKE_CASE_ : int = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase__ ) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=1 ): """simple docstring""" self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual( [isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase__ ) , ) self.assertEqual(len(lowerCAmelCase__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase__ ): # adds PAD dummy token SCREAMING_SNAKE_CASE_ : str = min_length + idx + 1 SCREAMING_SNAKE_CASE_ : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase__ ) , ) pass @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : str = XLMModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @require_torch class __lowercase (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president SCREAMING_SNAKE_CASE_ : Tuple = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference SCREAMING_SNAKE_CASE_ : int = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase__ )
360
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowerCAmelCase__ : Optional[Any] ='src/transformers' lowerCAmelCase__ : int ='docs/source/en/tasks' def a__ ( A__, A__, A__ ): with open(A__, 'r', encoding='utf-8', newline='\n' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : Any = 0 while not lines[start_index].startswith(A__ ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : int = start_index while not lines[end_index].startswith(A__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH) lowerCAmelCase__ : Dict ={ 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowerCAmelCase__ : Union[str, Any] ={ 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide] SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() ) SCREAMING_SNAKE_CASE_ : Tuple = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def a__ ( A__, A__=False ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file( filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', ) SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ ) if current_list != new_list: if overwrite: with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' ' to fix this.' ) if __name__ == "__main__": lowerCAmelCase__ : int =argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase__ : Union[str, Any] =parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
162
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
118
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''char''' lowerCamelCase__ = '''bpe''' lowerCamelCase__ = '''wp''' A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = ['''image_processor''', '''char_tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = '''MgpstrTokenizer''' def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __magic_name__ , ) SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) SCREAMING_SNAKE_CASE_ = tokenizer SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__magic_name__ , __magic_name__ ) def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int: if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is not None: SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE_ = encodings["input_ids"] return inputs def __A ( self : Tuple , __magic_name__ : int ) -> Any: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences SCREAMING_SNAKE_CASE_ = char_preds.size(0 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for i in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = final_strs SCREAMING_SNAKE_CASE_ = final_scores SCREAMING_SNAKE_CASE_ = char_strs SCREAMING_SNAKE_CASE_ = bpe_strs SCREAMING_SNAKE_CASE_ = wp_strs return out def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any: if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE_ = self.char_decode SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = "[s]" elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE_ = self.bpe_decode SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = "#" elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE_ = self.wp_decode SCREAMING_SNAKE_CASE_ = 102 SCREAMING_SNAKE_CASE_ = "[SEP]" else: raise ValueError(F'''Format {format} is not supported.''' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [] SCREAMING_SNAKE_CASE_ = pred_logits.size(0 ) SCREAMING_SNAKE_CASE_ = pred_logits.size(1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ ) SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:] SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:] for index in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ ) SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__magic_name__ ) conf_scores.append(__magic_name__ ) return dec_strs, conf_scores def __A ( self : Any , __magic_name__ : Dict ) -> List[str]: SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )] return decode_strs def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple: return self.bpe_tokenizer.batch_decode(__magic_name__ ) def __A ( self : str , __magic_name__ : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )] return decode_strs
118
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A__ = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) A__ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} A__ = '''zero2''' A__ = '''zero3''' A__ = [ZEROa, ZEROa] def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: """simple docstring""" snake_case__ : List[Any] = parameterized.to_safe_name('''_'''.join(str(__lowerCAmelCase ) for x in param.args ) ) return f"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class a ( __lowerCamelCase ): @parameterized.expand(__lowercase ,name_func=__lowercase ) def __lowerCamelCase ( self :Tuple ,__lowercase :List[str] ,__lowercase :int ): self.run_and_check( stage=__lowercase ,model=__lowercase ,distributed=__lowercase ,fpaa=__lowercase ,) @require_torch_multi_gpu @parameterized.expand(__lowercase ,name_func=__lowercase ) def __lowerCamelCase ( self :Tuple ,__lowercase :Any ,__lowercase :Union[str, Any] ): self.run_and_check( stage=__lowercase ,model=__lowercase ,distributed=__lowercase ,fpaa=__lowercase ,) @parameterized.expand(__lowercase ,name_func=__lowercase ) def __lowerCamelCase ( self :Any ,__lowercase :Dict ,__lowercase :str ): self.run_and_check( stage=__lowercase ,model=__lowercase ,distributed=__lowercase ,fpaa=__lowercase ,) @require_torch_multi_gpu @parameterized.expand(__lowercase ,name_func=__lowercase ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :Optional[Any] ,__lowercase :str ): self.run_and_check( stage=__lowercase ,model=__lowercase ,distributed=__lowercase ,fpaa=__lowercase ,) def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[Any] ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def __lowerCamelCase ( self :Dict ,__lowercase :str ,__lowercase :str ,__lowercase :int = 1_0 ,__lowercase :bool = True ,__lowercase :bool = True ,__lowercase :bool = True ,): snake_case__ : Union[str, Any] = models[model] snake_case__ : str = self.run_trainer( stage=__lowercase ,model_name=__lowercase ,eval_steps=__lowercase ,num_train_epochs=1 ,distributed=__lowercase ,fpaa=__lowercase ,) self.do_checks(__lowercase ) return output_dir def __lowerCamelCase ( self :List[str] ,__lowercase :str ,__lowercase :str ,__lowercase :int = 1_0 ,__lowercase :int = 1 ,__lowercase :bool = True ,__lowercase :bool = True ,): snake_case__ : int = self.get_auto_remove_tmp_dir('''./xxx''' ,after=__lowercase ) snake_case__ : Optional[Any] = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files snake_case__ : Dict = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() snake_case__ : Tuple = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] snake_case__ : Union[str, Any] = self.get_launcher(__lowercase ) snake_case__ : str = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowercase ,env=self.get_env() ) return output_dir def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Union[str, Any]=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) snake_case__ : Dict = min(2 ,get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
358
import argparse from collections import defaultdict def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: """simple docstring""" snake_case__ : Dict = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__lowerCAmelCase , '''r''' ) as f: snake_case__ : str = f.readlines() snake_case__ : List[str] = f"""class {class_name}(""" snake_case__ : Any = f"""{4 * ' '}def {test_name}(""" snake_case__ : Optional[int] = f"""{8 * ' '}{correct_line.split()[0]}""" snake_case__ : List[str] = f"""{16 * ' '}{correct_line.split()[0]}""" snake_case__ : Any = False snake_case__ : Optional[int] = False snake_case__ : Optional[Any] = False snake_case__ : int = False snake_case__ : Union[str, Any] = 0 snake_case__ : str = 0 snake_case__ : Union[str, Any] = [] for line in lines: if line.startswith(__lowerCAmelCase ): snake_case__ : Optional[Any] = True elif in_class and line.startswith(__lowerCAmelCase ): snake_case__ : Optional[int] = True elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )): snake_case__ : int = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: snake_case__ : Tuple = True if in_class and in_func and in_line: if ")" not in line: continue else: snake_case__ : List[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * ' '}{correct_line}""" ) snake_case__ : Optional[int] = False else: new_lines.append(__lowerCAmelCase ) with open(__lowerCAmelCase , '''w''' ) as f: for line in new_lines: f.write(__lowerCAmelCase ) def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict: """simple docstring""" if fail is not None: with open(__lowerCAmelCase , '''r''' ) as f: snake_case__ : Optional[int] = {l.strip() for l in f.readlines()} else: snake_case__ : Tuple = None with open(__lowerCAmelCase , '''r''' ) as f: snake_case__ : Optional[int] = f.readlines() snake_case__ : Tuple = defaultdict(__lowerCAmelCase ) for line in correct_lines: snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) A__ = parser.parse_args() main(args.correct_filename, args.fail_filename)
44
0
_UpperCAmelCase = 65521 def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : Tuple = 1 A_ : Tuple = 0 for plain_chr in plain_text: A_ : Dict = (a + ord(__lowercase )) % MOD_ADLER A_ : Any = (b + a) % MOD_ADLER return (b << 16) | a
140
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _UpperCAmelCase = version.parse(importlib_metadata.version("""nltk""")) if NLTK_VERSION >= version.Version("""3.6.4"""): from nltk import word_tokenize _UpperCAmelCase = """\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } """ _UpperCAmelCase = """\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. """ _UpperCAmelCase = """ Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: 'meteor': meteor score. Examples: >>> meteor = datasets.load_metric('meteor') >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"] >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results[\"meteor\"], 4)) 0.6944 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ): """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5' ): A_ : List[Any] = [ meteor_score.single_meteor_score( word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase ) for ref, pred in zip(lowercase , lowercase ) ] else: A_ : Optional[Any] = [ meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase ) for ref, pred in zip(lowercase , lowercase ) ] return {"meteor": np.mean(lowercase )}
140
1
import qiskit def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: __lowerCamelCase : Optional[Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register __lowerCamelCase : Any = qiskit.QuantumCircuit(A__ , A__ ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator __lowerCamelCase : Optional[Any] = qiskit.execute(A__ , A__ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(A__ ) if __name__ == "__main__": a =single_qubit_measure(2, 2) print(F"""Total count for various states are: {counts}""")
358
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu a =[ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]: __lowerCamelCase : int = True while ask_again: __lowerCamelCase : Dict = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=[] , lowerCamelCase__=None , lowerCamelCase__=0 ) -> str: __lowerCamelCase : Union[str, Any] = BulletMenu(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Tuple = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: __lowerCamelCase : List[str] = int(lowerCamelCase__ ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: __lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: __lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: __lowerCamelCase : Optional[Any] = int(lowerCamelCase__ ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]: return {"yes": True, "no": False}[value.lower()] class A_ ( argparse.RawDescriptionHelpFormatter ): def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : int = super()._format_usage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = usage.replace('<command> [<args>] ' ,'') return usage
113
0
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers A: Optional[int] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
109
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __lowerCAmelCase : int = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase_ ): """simple docstring""" A__ : Optional[Any] = ['''pixel_values'''] def __init__( self : Dict , _snake_case : bool = True , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ): super().__init__(**_snake_case ) __lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 256} __lowercase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case ) __lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowercase : Any = get_size_dict(_snake_case , param_name='''crop_size''' ) __lowercase : int = do_resize __lowercase : Union[str, Any] = size __lowercase : Optional[int] = resample __lowercase : str = do_center_crop __lowercase : str = crop_size __lowercase : Optional[int] = do_rescale __lowercase : str = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case_ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ): __lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __lowercase : Dict = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case ) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ): __lowercase : str = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : float , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any ): return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : Any , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ): return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_snake_case : int , ): __lowercase : List[str] = do_resize if do_resize is not None else self.do_resize __lowercase : str = size if size is not None else self.size __lowercase : Any = get_size_dict(_snake_case , default_to_square=_snake_case ) __lowercase : str = resample if resample is not None else self.resample __lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : int = get_size_dict(_snake_case , param_name='''crop_size''' ) __lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean __lowercase : Dict = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : List[str] = [to_numpy_array(_snake_case ) for image in images] if do_resize: __lowercase : str = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images] if do_center_crop: __lowercase : Dict = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images] if do_rescale: __lowercase : List[str] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: __lowercase : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] __lowercase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] __lowercase : List[str] = {'''pixel_values''': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case ) def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : List[Tuple] = None ): __lowercase : Tuple = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_snake_case ) != len(_snake_case ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_snake_case ): __lowercase : str = target_sizes.numpy() __lowercase : Union[str, Any] = [] for idx in range(len(_snake_case ) ): __lowercase : Union[str, Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case ) __lowercase : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_snake_case ) else: __lowercase : str = logits.argmax(dim=1 ) __lowercase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
156
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 snake_case_ = data_utils.TransfoXLTokenizer snake_case_ = data_utils.TransfoXLCorpus snake_case_ = data_utils snake_case_ = data_utils def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as fp: lowercase__ : str = pickle.load(SCREAMING_SNAKE_CASE_ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowercase__ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowercase__ : int = corpus.vocab.__dict__ torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ : List[str] = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , SCREAMING_SNAKE_CASE_ ) lowercase__ : Any = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowercase__ : Any = os.path.abspath(SCREAMING_SNAKE_CASE_ ) lowercase__ : Union[str, Any] = os.path.abspath(SCREAMING_SNAKE_CASE_ ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowercase__ : Any = TransfoXLConfig() else: lowercase__ : List[Any] = TransfoXLConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowercase__ : int = TransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_ ) lowercase__ : Optional[int] = load_tf_weights_in_transfo_xl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(f"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) print(f"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" ) with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) snake_case_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
216
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
216
1