code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = """upernet""" def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = backbone_config.get("model_type" ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(lowerCamelCase ) __a = backbone_config __a = hidden_size __a = initializer_range __a = pool_scales __a = use_auxiliary_head __a = auxiliary_loss_weight __a = auxiliary_in_channels __a = auxiliary_channels __a = auxiliary_num_convs __a = auxiliary_concat_input __a = loss_ignore_index def a__ ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
261
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin SCREAMING_SNAKE_CASE__:Any = random.Random() if is_torch_available(): import torch def _lowerCamelCase( a , a=1.0 , a=None , a=None ): if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ): __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = feature_size __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize def a__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ): def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __a = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ASTFeatureExtractor def a__ ( self ): __a = ASTFeatureExtractionTester(self ) def a__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a = np.asarray(lowerCamelCase ) __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) @require_torch def a__ ( self ): import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(100 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a__ ( self , lowerCamelCase ): from datasets import load_dataset __a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def a__ ( self ): # fmt: off __a = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on __a = self._load_datasamples(1 ) __a = ASTFeatureExtractor() __a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
261
1
"""simple docstring""" from itertools import count def _lowerCamelCase( a = 5_0 ): __a = [1] * min_block_length for n in count(a ): fill_count_functions.append(1 ) for block_length in range(a , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
261
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class snake_case__ ( snake_case_, snake_case_ ): @register_to_config def __init__( self , lowerCamelCase = 768 , ): super().__init__() __a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) ) __a = nn.Parameter(torch.ones(1 , lowerCamelCase ) ) def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ): __a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) ) __a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) ) return self def a__ ( self , lowerCamelCase ): __a = (embeds - self.mean) * 1.0 / self.std return embeds def a__ ( self , lowerCamelCase ): __a = (embeds * self.std) + self.mean return embeds
261
1
"""simple docstring""" from __future__ import annotations def _lowerCamelCase( a ): # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(a ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(a ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE__:List[str] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def a__ ( self ): torch.manual_seed(0 ) __a = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def a__ ( self ): torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def a__ ( self ): torch.manual_seed(0 ) __a = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) __a = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) __a = DDPMScheduler() __a = AudioDiffusionPipeline(vqvae=lowerCamelCase , unet=self.dummy_unet , mel=lowerCamelCase , scheduler=lowerCamelCase ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = torch.Generator(device=lowerCamelCase ).manual_seed(42 ) __a = pipe(generator=lowerCamelCase , steps=4 ) __a = output.audios[0] __a = output.images[0] __a = torch.Generator(device=lowerCamelCase ).manual_seed(42 ) __a = pipe(generator=lowerCamelCase , steps=4 , return_dict=lowerCamelCase ) __a = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) __a = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __a = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] __a = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 __a = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) __a = DDIMScheduler() __a = self.dummy_vqvae_and_unet __a = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase , scheduler=lowerCamelCase ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) np.random.seed(0 ) __a = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) __a = torch.Generator(device=lowerCamelCase ).manual_seed(42 ) __a = pipe(raw_audio=lowerCamelCase , generator=lowerCamelCase , start_step=5 , steps=10 ) __a = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) __a = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __a = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 __a = self.dummy_unet_condition __a = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase , mel=lowerCamelCase , scheduler=lowerCamelCase ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) np.random.seed(0 ) __a = torch.rand((1, 1, 10) ) __a = pipe(generator=lowerCamelCase , encoding=lowerCamelCase ) __a = output.images[0] __a = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __a = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = torch_device __a = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = torch.Generator(device=lowerCamelCase ).manual_seed(42 ) __a = pipe(generator=lowerCamelCase ) __a = output.audios[0] __a = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] __a = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] __a = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
261
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
1
"""simple docstring""" import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore SCREAMING_SNAKE_CASE__:Any = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" SCREAMING_SNAKE_CASE__:Any = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") SCREAMING_SNAKE_CASE__:List[Any] = [file for file in filepaths if """ """ in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") SCREAMING_SNAKE_CASE__:Any = [file for file in filepaths if """-""" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") SCREAMING_SNAKE_CASE__:str = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") SCREAMING_SNAKE_CASE__:Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
261
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __a = feature_size __a = sampling_rate __a = padding_value __a = kwargs.pop("padding_side" , "right" ) __a = kwargs.pop("return_attention_mask" , lowerCamelCase ) super().__init__(**lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) __a = processed_features[self.model_input_names[0]] __a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCamelCase ) == 0: if return_attention_mask: __a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __a = required_input[0] if isinstance(lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCamelCase ): __a = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCamelCase ): __a = "tf" elif is_torch_tensor(lowerCamelCase ): __a = "pt" elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ): __a = "np" else: raise ValueError( F"type of {first_element} unknown: {type(lowerCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __a = to_numpy(lowerCamelCase ) else: __a = [to_numpy(lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase ) __a = processed_features[self.model_input_names[0]] __a = len(lowerCamelCase ) if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) __a = [] for i in range(lowerCamelCase ): __a = {k: v[i] for k, v in processed_features.items()} # truncation __a = self._truncate( lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , ) truncated_inputs.append(lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __a = PaddingStrategy.MAX_LENGTH __a = {} for i in range(lowerCamelCase ): # padding __a = self._pad( truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __a = [] if value.dtype is np.dtype(np.floataa ): __a = value.astype(np.floataa ) batch_outputs[key].append(lowerCamelCase ) return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ): __a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __a = len(lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __a = np.ones(len(lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: __a = max_length - len(lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (0, difference) ) __a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (difference, 0) ) __a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) __a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = len(lowerCamelCase ) > max_length if needs_to_be_truncated: __a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __a = processed_features["attention_mask"][:max_length] return processed_features def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ): # Get padding strategy if padding is not False: if padding is True: __a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCamelCase , lowerCamelCase ): __a = PaddingStrategy(lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = padding else: __a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
261
"""simple docstring""" import random def _lowerCamelCase( a , a , a ): __a = a[left_index] __a = left_index + 1 for j in range(left_index + 1 , a ): if a[j] < pivot: __a , __a = a[i], a[j] i += 1 __a , __a = a[i - 1], a[left_index] return i - 1 def _lowerCamelCase( a , a , a ): if left < right: __a = random.randint(a , right - 1 ) __a , __a = ( a[left], a[pivot], ) # switches the pivot with the left most bound __a = partition(a , a , a ) quick_sort_random( a , a , a ) # recursive quicksort to the left of the pivot point quick_sort_random( a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point def _lowerCamelCase( ): __a = input("Enter numbers separated by a comma:\n" ).strip() __a = [int(a ) for item in user_input.split("," )] quick_sort_random(a , 0 , len(a ) ) print(a ) if __name__ == "__main__": main()
261
1
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Union[str, Any] = {"""vocab_file""": """vocab.json"""} SCREAMING_SNAKE_CASE__:List[str] = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } SCREAMING_SNAKE_CASE__:Optional[Any] = {"""mgp-str""": 27} class snake_case__ ( snake_case_ ): _snake_case : Optional[Any] = VOCAB_FILES_NAMES _snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowerCamelCase , lowerCamelCase="[GO]" , lowerCamelCase="[GO]" , lowerCamelCase="[s]" , lowerCamelCase="[GO]" , **lowerCamelCase ): super().__init__( unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __a = json.load(lowerCamelCase ) __a = {v: k for k, v in self.vocab.items()} @property def a__ ( self ): return len(self.vocab ) def a__ ( self ): return dict(self.vocab , **self.added_tokens_encoder ) def a__ ( self , lowerCamelCase ): __a = [] for s in text: char_tokens.extend(lowerCamelCase ) return char_tokens def a__ ( self , lowerCamelCase ): return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) ) def a__ ( self , lowerCamelCase ): return self.decoder.get(lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if not os.path.isdir(lowerCamelCase ): logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase ) ) return __a = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) return (vocab_file,)
261
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
1
"""simple docstring""" import re def _lowerCamelCase( a ): __a = re.compile( R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" ) return bool(re.search(a , a ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Any = """0094702343221""" print(is_sri_lankan_phone_number(phone))
261
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
1
"""simple docstring""" def _lowerCamelCase( a , a ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = """upernet""" def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = backbone_config.get("model_type" ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(lowerCamelCase ) __a = backbone_config __a = hidden_size __a = initializer_range __a = pool_scales __a = use_auxiliary_head __a = auxiliary_loss_weight __a = auxiliary_in_channels __a = auxiliary_channels __a = auxiliary_num_convs __a = auxiliary_concat_input __a = loss_ignore_index def a__ ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
261
1
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_attention_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_choices def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_attention_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a__ ( self ): __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def a__ ( self ): __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : Optional[int] = True _snake_case : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def a__ ( self ): __a = FlaxRobertaPreLayerNormModelTester(self ) @slow def a__ ( self ): for model_class_name in self.all_model_classes: __a = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase ) __a = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase ) @require_flax class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase ) __a = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) __a = model(lowerCamelCase )[0] __a = [1, 11, 50265] self.assertEqual(list(output.shape ) , lowerCamelCase ) # compare the actual values for a slice. __a = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @slow def a__ ( self ): __a = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase ) __a = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) __a = model(lowerCamelCase )[0] # compare the actual values for a slice. __a = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
"""simple docstring""" def _lowerCamelCase( a = 1_0_0_0 ): __a = 3 __a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 1_5 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
261
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class snake_case__ ( snake_case_ ): _snake_case : Union[str, Any] = """EncodecFeatureExtractor""" _snake_case : int = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , lowerCamelCase , lowerCamelCase ): super().__init__(lowerCamelCase , lowerCamelCase ) __a = self.feature_extractor __a = False def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ): return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase , language=lowerCamelCase , no_timestamps=lowerCamelCase ) def __call__( self , *lowerCamelCase , **lowerCamelCase ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCamelCase , **lowerCamelCase ) __a = kwargs.pop("audio" , lowerCamelCase ) __a = kwargs.pop("sampling_rate" , lowerCamelCase ) __a = kwargs.pop("text" , lowerCamelCase ) if len(lowerCamelCase ) > 0: __a = args[0] __a = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: __a = self.tokenizer(lowerCamelCase , **lowerCamelCase ) if audio is not None: __a = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: __a = audio_inputs["input_values"] if "padding_mask" in audio_inputs: __a = audio_inputs["padding_mask"] return inputs def a__ ( self , *lowerCamelCase , **lowerCamelCase ): __a = kwargs.pop("audio" , lowerCamelCase ) __a = kwargs.pop("padding_mask" , lowerCamelCase ) if len(lowerCamelCase ) > 0: __a = args[0] __a = args[1:] if audio_values is not None: return self._decode_audio(lowerCamelCase , padding_mask=lowerCamelCase ) else: return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase ) def a__ ( self , *lowerCamelCase , **lowerCamelCase ): return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None ): __a = to_numpy(lowerCamelCase ) __a , __a , __a = audio_values.shape if padding_mask is None: return list(lowerCamelCase ) __a = to_numpy(lowerCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __a = seq_len - padding_mask.shape[-1] __a = 1 - self.feature_extractor.padding_value __a = np.pad(lowerCamelCase , ((0, 0), (0, difference)) , "constant" , constant_values=lowerCamelCase ) __a = audio_values.tolist() for i in range(lowerCamelCase ): __a = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __a = sliced_audio.reshape(lowerCamelCase , -1 ) return audio_values
261
"""simple docstring""" import operator def _lowerCamelCase( a , a = False , a = None ): __a = operator.lt if reverse else operator.gt __a = solution or [] if not arr: return solution __a = [arr.pop(0 )] for i, item in enumerate(a ): if _operator(a , sublist[-1] ): sublist.append(a ) arr.pop(a ) # merging sublist into solution list if not solution: solution.extend(a ) else: while sublist: __a = sublist.pop(0 ) for i, xx in enumerate(a ): if not _operator(a , a ): solution.insert(a , a ) break else: solution.append(a ) strand_sort(a , a , a ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
261
1
"""simple docstring""" def _lowerCamelCase( a , a = " " ): __a = [] __a = 0 for index, char in enumerate(a ): if char == separator: split_words.append(string[last_index:index] ) __a = index + 1 elif index + 1 == len(a ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
261
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
1
"""simple docstring""" from math import pow, sqrt def _lowerCamelCase( *a ): __a = len(a ) > 0 and all(value > 0.0 for value in values ) return result def _lowerCamelCase( a , a ): return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a , a ) else ValueError("Input Error: Molar mass values must greater than 0." ) ) def _lowerCamelCase( a , a , a ): return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a , a , a ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def _lowerCamelCase( a , a , a ): return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a , a , a ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def _lowerCamelCase( a , a , a ): return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(a , a , a ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def _lowerCamelCase( a , a , a ): return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(a , a , a ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) )
261
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
261
1
"""simple docstring""" import math def _lowerCamelCase( a , a ): if ( not isinstance(a , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def _lowerCamelCase( a , a ): if ( not isinstance(a , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__:Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor __a = hf_model.adapter for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) __a = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(a , a , a , a ) __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "bias" in name: __a = "bias" elif "weight" in name: __a = "weight" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) def _lowerCamelCase( a , a , a , a , a ): __a = full_name.split("conv_layers." )[-1] __a = name.split("." ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a , a , a , a ): __a = full_name.split("adaptor." )[-1] __a = name.split("." ) if items[1].isdigit(): __a = int(items[1] ) else: __a = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." __a = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." __a = value logger.info(F"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(a , a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a ): __a , __a = emb.weight.shape __a = nn.Linear(a , a , bias=a ) __a = emb.weight.data return lin_layer @torch.no_grad() def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ): __a = WavaVecaConfig.from_pretrained( a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , ) __a = MBartConfig.from_pretrained(a ) # load model __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) __a = model[0].eval() # load feature extractor __a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a ) # set weights for wav2vec2 encoder __a = WavaVecaModel(a ) recursively_load_weights_wavaveca(model.encoder , a ) # load decoder weights __a = MBartForCausalLM(a ) __a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a ) logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) __a = SpeechEncoderDecoderModel(encoder=a , decoder=a ) __a = False __a = MBartaaTokenizer(a ) tokenizer.save_pretrained(a ) __a = hf_wavavec.config.to_dict() __a = tokenizer.pad_token_id __a = tokenizer.bos_token_id __a = tokenizer.eos_token_id __a = "mbart50" __a = "wav2vec2" __a = tokenizer.eos_token_id __a = 2_5_0_0_0_4 __a = tokenizer.eos_token_id __a = SpeechEncoderDecoderConfig.from_dict(a ) hf_wavavec.save_pretrained(a ) feature_extractor.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""") SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
261
1
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_attention_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_choices def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_attention_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a__ ( self ): __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : Any = True _snake_case : int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def a__ ( self ): __a = FlaxRoFormerModelTester(self ) @slow def a__ ( self ): for model_class_name in self.all_model_classes: __a = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCamelCase ) __a = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase ) @require_flax class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) __a = jnp.array([[0, 1, 2, 3, 4, 5]] ) __a = model(lowerCamelCase )[0] __a = 50000 __a = (1, 6, vocab_size) self.assertEqual(output.shape , lowerCamelCase ) __a = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__:str = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Tuple = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
1
"""simple docstring""" import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]), ({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(a , i + 1 ) for i in range(1_0 )]), ({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]), ({"num_shards": 1_0, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]), ({"num_shards": 3, "max_num_jobs": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def _lowerCamelCase( a , a ): __a = _distribute_shards(**a ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 1_0, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def _lowerCamelCase( a , a , a ): __a = _split_gen_kwargs(a , a ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def _lowerCamelCase( a , a ): if expected is RuntimeError: with pytest.raises(a ): _number_of_shards_in_gen_kwargs(a ) else: __a = _number_of_shards_in_gen_kwargs(a ) assert out == expected
261
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__) def _lowerCamelCase( a ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(a , "git_log.json" ) , "w" ) as f: json.dump(a , a , indent=4 ) def _lowerCamelCase( a ): if params.n_gpu <= 0: __a = 0 __a = -1 __a = True __a = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __a = int(os.environ["WORLD_SIZE"] ) __a = int(os.environ["N_GPU_NODE"] ) __a = int(os.environ["RANK"] ) # number of nodes / node ID __a = params.world_size // params.n_gpu_per_node __a = params.global_rank // params.n_gpu_per_node __a = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __a = 1 __a = 0 __a = 0 __a = 0 __a = 1 __a = 1 __a = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __a = params.node_id == 0 and params.local_rank == 0 __a = params.n_nodes > 1 # summary __a = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def _lowerCamelCase( a ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
261
1
"""simple docstring""" import os def _lowerCamelCase( a = "input.txt" ): with open(os.path.join(os.path.dirname(a ) , a ) ) as input_file: __a = [ [int(a ) for element in line.split("," )] for line in input_file.readlines() ] __a = len(a ) __a = len(matrix[0] ) __a = [[-1 for _ in range(a )] for _ in range(a )] for i in range(a ): __a = matrix[i][0] for j in range(1 , a ): for i in range(a ): __a = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a ): __a = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __a = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
261
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Optional[Any] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
261
1
"""simple docstring""" import torch from diffusers import DiffusionPipeline class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase ): super().__init__() self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase ) def __call__( self ): __a = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) __a = 1 __a = self.unet(lowerCamelCase , lowerCamelCase ).sample __a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample __a = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase ) return result
261
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ): __a , __a = row, column __a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )] def __str__( self ): __a = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __a = 0 for row_vector in self.array: for obj in row_vector: __a = max(lowerCamelCase , len(str(lowerCamelCase ) ) ) __a = F"%{max_element_length}s" # Make string and return def single_line(lowerCamelCase ) -> str: nonlocal string_format_identifier __a = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self ): return str(self ) def a__ ( self , lowerCamelCase ): if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCamelCase , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) __a = value def __add__( self , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] + another[r, c] return result def __neg__( self ): __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = -self[r, c] return result def __sub__( self , lowerCamelCase ): return self + (-another) def __mul__( self , lowerCamelCase ): if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] * another return result elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication assert self.column == another.row __a = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a = F"Unsupported type given for another ({type(lowerCamelCase )})" raise TypeError(lowerCamelCase ) def a__ ( self ): __a = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] return result def a__ ( self , lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a = v.transpose() __a = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _lowerCamelCase( ): # a^(-1) __a = Matrix(3 , 3 , 0 ) for i in range(3 ): __a = 1 print(F"a^(-1) is {ainv}" ) # u, v __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 1, 2, -3 __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" ) def _lowerCamelCase( ): import doctest doctest.testmod() testa()
261
1
"""simple docstring""" import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 128 , lowerCamelCase=[16, 32, 64, 128] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.02 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 128 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ): __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = is_training __a = use_labels __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = type_sequence_label_size __a = initializer_range __a = encoder_stride __a = num_attention_outputs __a = embed_dim __a = embed_dim + 1 __a = resolution __a = depths __a = hidden_sizes __a = dim __a = mlp_expansion_ratio def a__ ( self ): __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a = self.get_config() return config, pixel_values, labels def a__ ( self ): return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = TFEfficientFormerModel(config=lowerCamelCase ) __a = model(lowerCamelCase , training=lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = self.type_sequence_label_size __a = TFEfficientFormerForImageClassification(lowerCamelCase ) __a = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __a = 1 __a = TFEfficientFormerForImageClassification(lowerCamelCase ) __a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __a = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self ): __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ): _snake_case : Tuple = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case : int = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case : List[Any] = False _snake_case : Tuple = False _snake_case : Optional[int] = False _snake_case : List[Any] = False _snake_case : Tuple = False def a__ ( self ): __a = TFEfficientFormerModelTester(self ) __a = ConfigTester( self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def a__ ( self ): pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def a__ ( self ): pass def a__ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(lowerCamelCase ) __a = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def a__ ( self ): def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = model_class(lowerCamelCase ) __a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) __a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __a = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) if hasattr(self.model_tester , "encoder_seq_length" ): __a = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: __a = seq_length * self.model_tester.chunk_length else: __a = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __a = outputs.decoder_hidden_states self.asseretIsInstance(lowerCamelCase , (list, tuple) ) self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) __a = getattr(self.model_tester , "seq_length" , lowerCamelCase ) __a = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): __a = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def a__ ( self ): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = TFEfficientFormerModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def a__ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() __a = True __a = getattr(self.model_tester , "seq_length" , lowerCamelCase ) __a = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase ) __a = getattr(self.model_tester , "key_length" , lowerCamelCase ) __a = getattr(self.model_tester , "chunk_length" , lowerCamelCase ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): __a = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __a = True __a = False __a = True __a = model_class(lowerCamelCase ) __a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) __a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __a = True __a = model_class(lowerCamelCase ) __a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) __a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def a__ ( self ): # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __a = model_class(lowerCamelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __a = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __a = model(lowerCamelCase ) self.assertTrue(outputs_dict is not None ) def _lowerCamelCase( ): __a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__ ( unittest.TestCase ): @cached_property def a__ ( self ): return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def a__ ( self ): __a = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=lowerCamelCase , return_tensors="tf" ) # forward pass __a = model(**lowerCamelCase , training=lowerCamelCase ) # verify the logits __a = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) __a = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) ) @slow def a__ ( self ): __a = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=lowerCamelCase , return_tensors="tf" ) # forward pass __a = model(**lowerCamelCase , training=lowerCamelCase ) # verify the logits __a = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) __a = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
261
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _lowerCamelCase( a , a , a , a , a=True , a="pt" ): __a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {} __a = padding_side return tokenizer( [line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , ) def _lowerCamelCase( a , a , a=None , ): __a = input_ids.ne(a ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ): super().__init__() __a = Path(lowerCamelCase ).joinpath(type_path + ".source" ) __a = Path(lowerCamelCase ).joinpath(type_path + ".target" ) __a = self.get_char_lens(self.src_file ) __a = max_source_length __a = max_target_length assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}" __a = tokenizer __a = prefix if n_obs is not None: __a = self.src_lens[:n_obs] __a = src_lang __a = tgt_lang def __len__( self ): return len(self.src_lens ) def __getitem__( self , lowerCamelCase ): __a = index + 1 # linecache starts at 1 __a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" ) __a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" ) assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __a = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer ) __a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" ) __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" ) __a = source_inputs["input_ids"].squeeze() __a = target_inputs["input_ids"].squeeze() __a = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( lowerCamelCase ): return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()] def a__ ( self , lowerCamelCase ): __a = torch.stack([x["input_ids"] for x in batch] ) __a = torch.stack([x["attention_mask"] for x in batch] ) __a = torch.stack([x["decoder_input_ids"] for x in batch] ) __a = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = trim_batch(lowerCamelCase , lowerCamelCase ) __a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase ) __a = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__) def _lowerCamelCase( a ): return list(itertools.chain.from_iterable(a ) ) def _lowerCamelCase( a ): __a = get_git_info() save_json(a , os.path.join(a , "git_log.json" ) ) def _lowerCamelCase( a , a , a=4 , **a ): with open(a , "w" ) as f: json.dump(a , a , indent=a , **a ) def _lowerCamelCase( a ): with open(a ) as f: return json.load(a ) def _lowerCamelCase( ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def _lowerCamelCase( a , a ): return list(map(a , a ) ) def _lowerCamelCase( a , a ): with open(a , "wb" ) as f: return pickle.dump(a , a ) def _lowerCamelCase( a ): def remove_articles(a ): return re.sub(R"\b(a|an|the)\b" , " " , a ) def white_space_fix(a ): return " ".join(text.split() ) def remove_punc(a ): __a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) ) def _lowerCamelCase( a , a ): __a = normalize_answer(a ).split() __a = normalize_answer(a ).split() __a = Counter(a ) & Counter(a ) __a = sum(common.values() ) if num_same == 0: return 0 __a = 1.0 * num_same / len(a ) __a = 1.0 * num_same / len(a ) __a = (2 * precision * recall) / (precision + recall) return fa def _lowerCamelCase( a , a ): return normalize_answer(a ) == normalize_answer(a ) def _lowerCamelCase( a , a ): assert len(a ) == len(a ) __a = 0 for hypo, pred in zip(a , a ): em += exact_match_score(a , a ) if len(a ) > 0: em /= len(a ) return {"em": em} def _lowerCamelCase( a ): return model_prefix.startswith("rag" ) def _lowerCamelCase( a , a , a ): __a = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __a = "dropout_rate" for p in extra_params: if getattr(a , a , a ): if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(a ) ) delattr(a , a ) continue __a = p if hasattr(a , a ) else equivalent_param[p] setattr(a , a , getattr(a , a ) ) delattr(a , a ) return hparams, config
261
1
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html SCREAMING_SNAKE_CASE__:Tuple = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class snake_case__ : _snake_case : int = PegasusConfig _snake_case : List[str] = {} _snake_case : Optional[int] = """gelu""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __a = np.concatenate([input_ids, eos_tensor] , axis=1 ) __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __a = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return config, inputs_dict def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = 20 __a = model_class_name(lowerCamelCase ) __a = model.encode(inputs_dict["input_ids"] ) __a , __a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) __a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __a = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __a = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , ) __a = model.decode(lowerCamelCase , lowerCamelCase ) __a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = 20 __a = model_class_name(lowerCamelCase ) __a = model.encode(inputs_dict["input_ids"] ) __a , __a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase ) __a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __a = model.decode( decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __a = model.decode( decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , ) __a = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase ) __a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" ) def _lowerCamelCase( a , a , a , a=None , a=None , ): if attention_mask is None: __a = np.not_equal(a , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __a = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _snake_case : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _snake_case : str = True _snake_case : Tuple = False _snake_case : List[Any] = False _snake_case : Union[str, Any] = False def a__ ( self ): __a = FlaxPegasusModelTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __a = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) __a = model_class(lowerCamelCase ) @jax.jit def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ): return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ) with self.subTest("JIT Enabled" ): __a = encode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __a = encode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __a = model_class(lowerCamelCase ) __a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __a = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ): return model.decode( decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , ) with self.subTest("JIT Enabled" ): __a = decode_jitted(**lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __a = decode_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def a__ ( self ): for model_class_name in self.all_model_classes: __a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCamelCase ) __a = np.ones((1, 1) ) __a = model(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @slow def a__ ( self ): __a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) __a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) __a = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] __a = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] __a = tokenizer(lowerCamelCase , return_tensors="np" , truncation=lowerCamelCase , max_length=512 , padding=lowerCamelCase ) __a = model.generate(**lowerCamelCase , num_beams=2 ).sequences __a = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase ) assert tgt_text == decoded
261
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case__ ( snake_case_ ): _snake_case : "DiagonalGaussianDistribution" class snake_case__ ( snake_case_, snake_case_ ): _snake_case : Optional[Any] = True @register_to_config def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ): super().__init__() # pass init params to Encoder __a = Encoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , ) # pass init params to Decoder __a = Decoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , ) __a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 ) __a = False __a = False # only relevant if vae tiling is enabled __a = self.config.sample_size __a = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __a = 0.25 def a__ ( self , lowerCamelCase , lowerCamelCase=False ): if isinstance(lowerCamelCase , (Encoder, Decoder) ): __a = value def a__ ( self , lowerCamelCase = True ): __a = use_tiling def a__ ( self ): self.enable_tiling(lowerCamelCase ) def a__ ( self ): __a = True def a__ ( self ): __a = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a__ ( self ): __a = {} def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return processors def a__ ( self , lowerCamelCase ): __a = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): if not isinstance(lowerCamelCase , lowerCamelCase ): module.set_processor(lowerCamelCase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: __a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase ) __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_slicing and z.shape[0] > 1: __a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self._decode(lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[2] , b.shape[2] , lowerCamelCase ) for y in range(lowerCamelCase ): __a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[3] , b.shape[3] , lowerCamelCase ) for x in range(lowerCamelCase ): __a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_latent_min_size * self.tile_overlap_factor ) __a = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __a = [] for i in range(0 , x.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , x.shape[3] , lowerCamelCase ): __a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_sample_min_size * self.tile_overlap_factor ) __a = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __a = [] for i in range(0 , z.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , z.shape[3] , lowerCamelCase ): __a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ): __a = sample __a = self.encode(lowerCamelCase ).latent_dist if sample_posterior: __a = posterior.sample(generator=lowerCamelCase ) else: __a = posterior.mode() __a = self.decode(lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase )
261
1
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__:str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class snake_case__ ( snake_case_ ): _snake_case : int = """mvp""" _snake_case : Any = ["""past_key_values"""] _snake_case : int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , lowerCamelCase=50267 , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase="gelu" , lowerCamelCase=1024 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=0.0 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=100 , lowerCamelCase=800 , **lowerCamelCase , ): __a = vocab_size __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = encoder_layerdrop __a = decoder_layerdrop __a = classifier_dropout __a = use_cache __a = encoder_layers __a = scale_embedding # scale factor will be sqrt(d_model) if True __a = use_prompt __a = prompt_length __a = prompt_mid_dim super().__init__( pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowerCamelCase ): __a = self.bos_token_id warnings.warn( F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " "The config can simply be saved and uploaded again to be fixed." )
261
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __a = feature_size __a = sampling_rate __a = padding_value __a = kwargs.pop("padding_side" , "right" ) __a = kwargs.pop("return_attention_mask" , lowerCamelCase ) super().__init__(**lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) __a = processed_features[self.model_input_names[0]] __a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCamelCase ) == 0: if return_attention_mask: __a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __a = required_input[0] if isinstance(lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCamelCase ): __a = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCamelCase ): __a = "tf" elif is_torch_tensor(lowerCamelCase ): __a = "pt" elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ): __a = "np" else: raise ValueError( F"type of {first_element} unknown: {type(lowerCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __a = to_numpy(lowerCamelCase ) else: __a = [to_numpy(lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase ) __a = processed_features[self.model_input_names[0]] __a = len(lowerCamelCase ) if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) __a = [] for i in range(lowerCamelCase ): __a = {k: v[i] for k, v in processed_features.items()} # truncation __a = self._truncate( lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , ) truncated_inputs.append(lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __a = PaddingStrategy.MAX_LENGTH __a = {} for i in range(lowerCamelCase ): # padding __a = self._pad( truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __a = [] if value.dtype is np.dtype(np.floataa ): __a = value.astype(np.floataa ) batch_outputs[key].append(lowerCamelCase ) return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ): __a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __a = len(lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __a = np.ones(len(lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: __a = max_length - len(lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (0, difference) ) __a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (difference, 0) ) __a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) __a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = len(lowerCamelCase ) > max_length if needs_to_be_truncated: __a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __a = processed_features["attention_mask"][:max_length] return processed_features def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ): # Get padding strategy if padding is not False: if padding is True: __a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCamelCase , lowerCamelCase ): __a = PaddingStrategy(lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = padding else: __a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
261
1
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case__ ( snake_case_ ): _snake_case : "DiagonalGaussianDistribution" class snake_case__ ( snake_case_, snake_case_ ): _snake_case : Optional[Any] = True @register_to_config def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ): super().__init__() # pass init params to Encoder __a = Encoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , ) # pass init params to Decoder __a = Decoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , ) __a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 ) __a = False __a = False # only relevant if vae tiling is enabled __a = self.config.sample_size __a = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __a = 0.25 def a__ ( self , lowerCamelCase , lowerCamelCase=False ): if isinstance(lowerCamelCase , (Encoder, Decoder) ): __a = value def a__ ( self , lowerCamelCase = True ): __a = use_tiling def a__ ( self ): self.enable_tiling(lowerCamelCase ) def a__ ( self ): __a = True def a__ ( self ): __a = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a__ ( self ): __a = {} def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return processors def a__ ( self , lowerCamelCase ): __a = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): if not isinstance(lowerCamelCase , lowerCamelCase ): module.set_processor(lowerCamelCase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: __a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase ) __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_slicing and z.shape[0] > 1: __a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self._decode(lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[2] , b.shape[2] , lowerCamelCase ) for y in range(lowerCamelCase ): __a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[3] , b.shape[3] , lowerCamelCase ) for x in range(lowerCamelCase ): __a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_latent_min_size * self.tile_overlap_factor ) __a = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __a = [] for i in range(0 , x.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , x.shape[3] , lowerCamelCase ): __a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_sample_min_size * self.tile_overlap_factor ) __a = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __a = [] for i in range(0 , z.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , z.shape[3] , lowerCamelCase ): __a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ): __a = sample __a = self.encode(lowerCamelCase ).latent_dist if sample_posterior: __a = posterior.sample(generator=lowerCamelCase ) else: __a = posterior.mode() __a = self.decode(lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase )
261
"""simple docstring""" from collections import Counter from timeit import timeit def _lowerCamelCase( a = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def _lowerCamelCase( a = "" ): if len(a ) == 0: return True __a = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __a = {} for character in lower_case_input_str: __a = character_freq_dict.get(a , 0 ) + 1 __a = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCamelCase( a = "" ): print("\nFor string = " , a , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
261
1
"""simple docstring""" def _lowerCamelCase( ): __a = [] __a = 1 while len(a ) < 1E6: constant.append(str(a ) ) i += 1 __a = "".join(a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
261
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin SCREAMING_SNAKE_CASE__:Any = random.Random() if is_torch_available(): import torch def _lowerCamelCase( a , a=1.0 , a=None , a=None ): if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ): __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = feature_size __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize def a__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ): def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __a = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ASTFeatureExtractor def a__ ( self ): __a = ASTFeatureExtractionTester(self ) def a__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a = np.asarray(lowerCamelCase ) __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) @require_torch def a__ ( self ): import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(100 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a__ ( self , lowerCamelCase ): from datasets import load_dataset __a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def a__ ( self ): # fmt: off __a = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on __a = self._load_datasamples(1 ) __a = ASTFeatureExtractor() __a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
261
1
"""simple docstring""" from __future__ import annotations from dataclasses import dataclass @dataclass class snake_case__ : _snake_case : float _snake_case : TreeNode | None = None _snake_case : TreeNode | None = None def _lowerCamelCase( a ): # Validation def is_valid_tree(a ) -> bool: if node is None: return True if not isinstance(a , a ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(a ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( a , a , a ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , a , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , a ) ) return is_binary_search_tree_recursive_check(a , -float("inf" ) , float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class snake_case__ ( snake_case_, snake_case_ ): @register_to_config def __init__( self , lowerCamelCase = 768 , ): super().__init__() __a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) ) __a = nn.Parameter(torch.ones(1 , lowerCamelCase ) ) def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ): __a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) ) __a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) ) return self def a__ ( self , lowerCamelCase ): __a = (embeds - self.mean) * 1.0 / self.std return embeds def a__ ( self , lowerCamelCase ): __a = (embeds * self.std) + self.mean return embeds
261
1
"""simple docstring""" class snake_case__ : def __init__( self , lowerCamelCase = "" , lowerCamelCase = False ): # Mapping from the first character of the prefix of the node __a = {} # A node will be a leaf if the tree contains its word __a = is_leaf __a = prefix def a__ ( self , lowerCamelCase ): __a = 0 for q, w in zip(self.prefix , lowerCamelCase ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def a__ ( self , lowerCamelCase ): for word in words: self.insert(lowerCamelCase ) def a__ ( self , lowerCamelCase ): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: __a = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: __a = RadixNode(prefix=lowerCamelCase , is_leaf=lowerCamelCase ) else: __a = self.nodes[word[0]] __a , __a , __a = incoming_node.match( lowerCamelCase ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCamelCase ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: __a = remaining_prefix __a = self.nodes[matching_string[0]] __a = RadixNode(lowerCamelCase , lowerCamelCase ) __a = aux_node if remaining_word == "": __a = True else: self.nodes[matching_string[0]].insert(lowerCamelCase ) def a__ ( self , lowerCamelCase ): __a = self.nodes.get(word[0] , lowerCamelCase ) if not incoming_node: return False else: __a , __a , __a = incoming_node.match( lowerCamelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCamelCase ) def a__ ( self , lowerCamelCase ): __a = self.nodes.get(word[0] , lowerCamelCase ) if not incoming_node: return False else: __a , __a , __a = incoming_node.match( lowerCamelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCamelCase ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: __a = list(self.nodes.values() )[0] __a = merging_node.is_leaf self.prefix += merging_node.prefix __a = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: __a = False # If there is 1 edge, we merge it with its child else: __a = list(incoming_node.nodes.values() )[0] __a = merging_node.is_leaf incoming_node.prefix += merging_node.prefix __a = merging_node.nodes return True def a__ ( self , lowerCamelCase = 0 ): if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def _lowerCamelCase( ): __a = "banana bananas bandana band apple all beast".split() __a = RadixNode() root.insert_many(a ) assert all(root.find(a ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def _lowerCamelCase( ): assert test_trie() def _lowerCamelCase( ): __a = RadixNode() __a = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(a ) print("Words:" , a ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
261
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE__:List[str] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
1
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__) def _lowerCamelCase( a ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(a , "git_log.json" ) , "w" ) as f: json.dump(a , a , indent=4 ) def _lowerCamelCase( a ): if params.n_gpu <= 0: __a = 0 __a = -1 __a = True __a = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __a = int(os.environ["WORLD_SIZE"] ) __a = int(os.environ["N_GPU_NODE"] ) __a = int(os.environ["RANK"] ) # number of nodes / node ID __a = params.world_size // params.n_gpu_per_node __a = params.global_rank // params.n_gpu_per_node __a = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __a = 1 __a = 0 __a = 0 __a = 0 __a = 1 __a = 1 __a = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __a = params.node_id == 0 and params.local_rank == 0 __a = params.n_nodes > 1 # summary __a = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def _lowerCamelCase( a ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
261
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _lowerCamelCase( a ): __a = {} __a = tokenizer(example["content"] , truncation=a )["input_ids"] __a = len(example["content"] ) / len(output["input_ids"] ) return output SCREAMING_SNAKE_CASE__:Optional[int] = HfArgumentParser(PretokenizationArguments) SCREAMING_SNAKE_CASE__:str = parser.parse_args() if args.num_workers is None: SCREAMING_SNAKE_CASE__:Any = multiprocessing.cpu_count() SCREAMING_SNAKE_CASE__:int = AutoTokenizer.from_pretrained(args.tokenizer_dir) SCREAMING_SNAKE_CASE__:int = time.time() SCREAMING_SNAKE_CASE__:str = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') SCREAMING_SNAKE_CASE__:int = time.time() SCREAMING_SNAKE_CASE__:List[Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') SCREAMING_SNAKE_CASE__:Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
261
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
1
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : Tuple = FlaxAutoencoderKL @property def a__ ( self ): __a = 4 __a = 3 __a = (32, 32) __a = jax.random.PRNGKey(0 ) __a = jax.random.uniform(lowerCamelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def a__ ( self ): __a = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __a = self.dummy_input return init_dict, inputs_dict
261
"""simple docstring""" import random def _lowerCamelCase( a , a , a ): __a = a[left_index] __a = left_index + 1 for j in range(left_index + 1 , a ): if a[j] < pivot: __a , __a = a[i], a[j] i += 1 __a , __a = a[i - 1], a[left_index] return i - 1 def _lowerCamelCase( a , a , a ): if left < right: __a = random.randint(a , right - 1 ) __a , __a = ( a[left], a[pivot], ) # switches the pivot with the left most bound __a = partition(a , a , a ) quick_sort_random( a , a , a ) # recursive quicksort to the left of the pivot point quick_sort_random( a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point def _lowerCamelCase( ): __a = input("Enter numbers separated by a comma:\n" ).strip() __a = [int(a ) for item in user_input.split("," )] quick_sort_random(a , 0 , len(a ) ) print(a ) if __name__ == "__main__": main()
261
1
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:Dict = logging.get_logger("""transformers.models.encodec""") SCREAMING_SNAKE_CASE__:str = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } SCREAMING_SNAKE_CASE__:List[Any] = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } SCREAMING_SNAKE_CASE__:Union[str, Any] = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } SCREAMING_SNAKE_CASE__:Dict = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } SCREAMING_SNAKE_CASE__:Dict = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } SCREAMING_SNAKE_CASE__:str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } SCREAMING_SNAKE_CASE__:Dict = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } SCREAMING_SNAKE_CASE__:Dict = [] SCREAMING_SNAKE_CASE__:str = [] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value elif weight_type == "running_mean": __a = value elif weight_type == "running_var": __a = value elif weight_type == "num_batches_tracked": __a = value elif weight_type == "weight_ih_l0": __a = value elif weight_type == "weight_hh_l0": __a = value elif weight_type == "bias_ih_l0": __a = value elif weight_type == "bias_hh_l0": __a = value elif weight_type == "weight_ih_l1": __a = value elif weight_type == "weight_hh_l1": __a = value elif weight_type == "bias_ih_l1": __a = value elif weight_type == "bias_hh_l1": __a = value else: __a = value logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: __a , __a = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _lowerCamelCase( a , a , a ): __a = [] if model_name == "encodec_24khz" or "encodec_32khz": __a = MAPPING_24K elif model_name == "encodec_48khz": __a = MAPPING_48K else: raise ValueError(F"Unsupported model: {model_name}" ) for name, value in orig_dict.items(): if should_ignore(a , a ): logger.info(F"{name} was ignored" ) continue __a = False for key, mapped_key in MAPPING.items(): if "*" in key: __a , __a = key.split(".*." ) if prefix in name and suffix in name: __a = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "weight_ih_l0" in name: __a = "weight_ih_l0" elif "weight_hh_l0" in name: __a = "weight_hh_l0" elif "bias_ih_l0" in name: __a = "bias_ih_l0" elif "bias_hh_l0" in name: __a = "bias_hh_l0" elif "weight_ih_l1" in name: __a = "weight_ih_l1" elif "weight_hh_l1" in name: __a = "weight_hh_l1" elif "bias_ih_l1" in name: __a = "bias_ih_l1" elif "bias_hh_l1" in name: __a = "bias_hh_l1" elif "bias" in name: __a = "bias" elif "weight" in name: __a = "weight" elif "running_mean" in name: __a = "running_mean" elif "running_var" in name: __a = "running_var" elif "num_batches_tracked" in name: __a = "num_batches_tracked" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) @torch.no_grad() def _lowerCamelCase( a , a , a , a=None , a=None , ): if config_path is not None: __a = EncodecConfig.from_pretrained(a ) else: __a = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": __a = [8, 5, 4, 4] __a = [2.2] __a = 6_4 __a = 3_2_0_0_0 __a = 2_0_4_8 __a = False __a = False __a = False elif model_name == "encodec_48khz": __a = [8, 5, 4, 2] __a = [3.0, 6.0, 12.0, 24.0] __a = 4_8_0_0_0 __a = 2 __a = False __a = "time_group_norm" __a = True __a = 1.0 __a = 0.01 else: raise ValueError(F"Unknown model name: {model_name}" ) __a = EncodecModel(a ) __a = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(a ) __a = torch.load(a ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights __a = original_checkpoint["best_state"] recursively_load_weights(a , a , a ) model.save_pretrained(a ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(a ) model.push_to_hub(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__:str = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
261
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def _lowerCamelCase( a=None ): if subparsers is not None: __a = subparsers.add_parser("test" ) else: __a = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=a , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=a ) return parser def _lowerCamelCase( a ): __a = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __a = script_name else: __a = F"--config_file={args.config_file} {script_name}" __a = ["accelerate-launch"] + test_args.split() __a = execute_subprocess_async(a , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def _lowerCamelCase( ): __a = test_command_parser() __a = parser.parse_args() test_command(a ) if __name__ == "__main__": main()
261
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
1
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = """upernet""" def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = backbone_config.get("model_type" ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(lowerCamelCase ) __a = backbone_config __a = hidden_size __a = initializer_range __a = pool_scales __a = use_auxiliary_head __a = auxiliary_loss_weight __a = auxiliary_in_channels __a = auxiliary_channels __a = auxiliary_num_convs __a = auxiliary_concat_input __a = loss_ignore_index def a__ ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
261
1
"""simple docstring""" def _lowerCamelCase( a , a ): __a = 0 __a = len(a ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __a = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(a ): return None __a = sorted_collection[point] if current_item == item: return point else: if point < left: __a = left __a = point elif point > right: __a = right __a = point else: if item < current_item: __a = point - 1 else: __a = point + 1 return None def _lowerCamelCase( a , a , a , a ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __a = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(a ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(a , a , a , a ) elif point > right: return interpolation_search_by_recursion(a , a , a , a ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( a , a , a , point - 1 ) else: return interpolation_search_by_recursion( a , a , point + 1 , a ) def _lowerCamelCase( a ): if collection != sorted(a ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys SCREAMING_SNAKE_CASE__:List[Any] = 0 if debug == 1: SCREAMING_SNAKE_CASE__:List[Any] = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") SCREAMING_SNAKE_CASE__:List[str] = 67 SCREAMING_SNAKE_CASE__:Optional[Any] = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print("""Not found""")
261
"""simple docstring""" def _lowerCamelCase( a = 1_0_0_0 ): __a = 3 __a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 1_5 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
261
1
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:str = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) SCREAMING_SNAKE_CASE__:List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _lowerCamelCase( a ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __a = model_type_to_module_name(a ) __a = importlib.import_module(F".{module_name}" , "transformers.models" ) try: return getattr(a , a ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(a , "__name__" , a ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __a = importlib.import_module("transformers" ) if hasattr(a , a ): return getattr(a , a ) return None def _lowerCamelCase( a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ): __a = get_file_from_repo( a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(a , encoding="utf-8" ) as reader: return json.load(a ) class snake_case__ : def __init__( self ): raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(lowerCamelCase ) def a__ ( cls , lowerCamelCase , **lowerCamelCase ): __a = kwargs.pop("config" , lowerCamelCase ) __a = kwargs.pop("trust_remote_code" , lowerCamelCase ) __a = True __a , __a = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase , **lowerCamelCase ) __a = config_dict.get("feature_extractor_type" , lowerCamelCase ) __a = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): __a = config_dict["auto_map"]["AutoFeatureExtractor"] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCamelCase , lowerCamelCase ): __a = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase ) # It could be in `config.feature_extractor_type`` __a = getattr(lowerCamelCase , "feature_extractor_type" , lowerCamelCase ) if hasattr(lowerCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: __a = config.auto_map["AutoFeatureExtractor"] if feature_extractor_class is not None: __a = feature_extractor_class_from_name(lowerCamelCase ) __a = feature_extractor_auto_map is not None __a = feature_extractor_class is not None or type(lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING __a = resolve_trust_remote_code( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if has_remote_code and trust_remote_code: __a = get_class_from_dynamic_module( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) __a = kwargs.pop("code_revision" , lowerCamelCase ) if os.path.isdir(lowerCamelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING: __a = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase )] return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase ) raise ValueError( F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" ) @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase , lowerCamelCase )
261
"""simple docstring""" import operator def _lowerCamelCase( a , a = False , a = None ): __a = operator.lt if reverse else operator.gt __a = solution or [] if not arr: return solution __a = [arr.pop(0 )] for i, item in enumerate(a ): if _operator(a , sublist[-1] ): sublist.append(a ) arr.pop(a ) # merging sublist into solution list if not solution: solution.extend(a ) else: while sublist: __a = sublist.pop(0 ) for i, xx in enumerate(a ): if not _operator(a , a ): solution.insert(a , a ) break else: solution.append(a ) strand_sort(a , a , a ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
261
1
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
1
"""simple docstring""" import math class snake_case__ : def __init__( self , lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1 __a = n __a = [ [math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase ) ] # adjacency matrix for weight __a = [ [math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = w def a__ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): __a = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def a__ ( self , lowerCamelCase , lowerCamelCase ): return self.dp[u][v] if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Dict = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
261
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
261
1
"""simple docstring""" from collections import namedtuple SCREAMING_SNAKE_CASE__:List[Any] = namedtuple("""from_to""", """from_ to""") SCREAMING_SNAKE_CASE__:Union[str, Any] = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00_454, 264.172), """cubicyard""": from_to(0.76_455, 1.30_795), """cubicfoot""": from_to(0.028, 35.3_147), """cup""": from_to(0.000_236_588, 4_226.75), } def _lowerCamelCase( a , a , a ): if from_type not in METRIC_CONVERSION: raise ValueError( F"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(a ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(a ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__:Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor __a = hf_model.adapter for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) __a = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(a , a , a , a ) __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "bias" in name: __a = "bias" elif "weight" in name: __a = "weight" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) def _lowerCamelCase( a , a , a , a , a ): __a = full_name.split("conv_layers." )[-1] __a = name.split("." ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a , a , a , a ): __a = full_name.split("adaptor." )[-1] __a = name.split("." ) if items[1].isdigit(): __a = int(items[1] ) else: __a = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." __a = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." __a = value logger.info(F"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(a , a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a ): __a , __a = emb.weight.shape __a = nn.Linear(a , a , bias=a ) __a = emb.weight.data return lin_layer @torch.no_grad() def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ): __a = WavaVecaConfig.from_pretrained( a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , ) __a = MBartConfig.from_pretrained(a ) # load model __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) __a = model[0].eval() # load feature extractor __a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a ) # set weights for wav2vec2 encoder __a = WavaVecaModel(a ) recursively_load_weights_wavaveca(model.encoder , a ) # load decoder weights __a = MBartForCausalLM(a ) __a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a ) logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) __a = SpeechEncoderDecoderModel(encoder=a , decoder=a ) __a = False __a = MBartaaTokenizer(a ) tokenizer.save_pretrained(a ) __a = hf_wavavec.config.to_dict() __a = tokenizer.pad_token_id __a = tokenizer.bos_token_id __a = tokenizer.eos_token_id __a = "mbart50" __a = "wav2vec2" __a = tokenizer.eos_token_id __a = 2_5_0_0_0_4 __a = tokenizer.eos_token_id __a = SpeechEncoderDecoderConfig.from_dict(a ) hf_wavavec.save_pretrained(a ) feature_extractor.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""") SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
261
1
"""simple docstring""" def _lowerCamelCase( a ): __a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
261
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__:str = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Tuple = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class snake_case__ ( snake_case_ ): def __init__( self , *lowerCamelCase , **lowerCamelCase ): super().__init__(*lowerCamelCase , **lowerCamelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ): __a = {} __a = {} if prompt is not None: __a = prompt if generate_kwargs is not None: __a = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __a = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) __a = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , lowerCamelCase , **lowerCamelCase ): return super().__call__(lowerCamelCase , **lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase=None ): __a = load_image(lowerCamelCase ) if prompt is not None: if not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError( F"Received an invalid text input, got - {type(lowerCamelCase )} - but expected a single string. " "Note also that one single text can be provided for conditional image to text generation." ) __a = self.model.config.model_type if model_type == "git": __a = self.image_processor(images=lowerCamelCase , return_tensors=self.framework ) __a = self.tokenizer(text=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids __a = [self.tokenizer.cls_token_id] + input_ids __a = torch.tensor(lowerCamelCase ).unsqueeze(0 ) model_inputs.update({"input_ids": input_ids} ) elif model_type == "pix2struct": __a = self.image_processor(images=lowerCamelCase , header_text=lowerCamelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __a = self.image_processor(images=lowerCamelCase , return_tensors=self.framework ) __a = self.tokenizer(lowerCamelCase , return_tensors=self.framework ) model_inputs.update(lowerCamelCase ) else: raise ValueError(F"Model type {model_type} does not support conditional text generation" ) else: __a = self.image_processor(images=lowerCamelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __a = None return model_inputs def a__ ( self , lowerCamelCase , lowerCamelCase=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , lowerCamelCase ) and all(x is None for x in model_inputs["input_ids"] ) ): __a = None if generate_kwargs is None: __a = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __a = model_inputs.pop(self.model.main_input_name ) __a = self.model.generate(lowerCamelCase , **lowerCamelCase , **lowerCamelCase ) return model_outputs def a__ ( self , lowerCamelCase ): __a = [] for output_ids in model_outputs: __a = { "generated_text": self.tokenizer.decode( lowerCamelCase , skip_special_tokens=lowerCamelCase , ) } records.append(lowerCamelCase ) return records
261
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__) def _lowerCamelCase( a ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(a , "git_log.json" ) , "w" ) as f: json.dump(a , a , indent=4 ) def _lowerCamelCase( a ): if params.n_gpu <= 0: __a = 0 __a = -1 __a = True __a = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __a = int(os.environ["WORLD_SIZE"] ) __a = int(os.environ["N_GPU_NODE"] ) __a = int(os.environ["RANK"] ) # number of nodes / node ID __a = params.world_size // params.n_gpu_per_node __a = params.global_rank // params.n_gpu_per_node __a = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __a = 1 __a = 0 __a = 0 __a = 0 __a = 1 __a = 1 __a = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __a = params.node_id == 0 and params.local_rank == 0 __a = params.n_nodes > 1 # summary __a = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def _lowerCamelCase( a ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
261
1
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=0.2 , lowerCamelCase=0.2 ): __a = bp_numa __a = bp_numa __a = bp_numa __a = conva_get[:2] __a = conva_get[2] __a = size_pa __a = rate_w __a = rate_t __a = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] __a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __a = -2 * np.random.rand(self.conva[1] ) + 1 __a = -2 * np.random.rand(self.num_bpa ) + 1 __a = -2 * np.random.rand(self.num_bpa ) + 1 def a__ ( self , lowerCamelCase ): # save model dict with pickle __a = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(lowerCamelCase , "wb" ) as f: pickle.dump(lowerCamelCase , lowerCamelCase ) print(F"Model saved: {save_path}" ) @classmethod def a__ ( cls , lowerCamelCase ): # read saved model with open(lowerCamelCase , "rb" ) as f: __a = pickle.load(lowerCamelCase ) # noqa: S301 __a = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) __a = model_dic.get("size_pooling1" ) __a = model_dic.get("num_bp1" ) __a = model_dic.get("num_bp2" ) __a = model_dic.get("num_bp3" ) __a = model_dic.get("rate_weight" ) __a = model_dic.get("rate_thre" ) # create model instance __a = CNN(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # modify model parameter __a = model_dic.get("w_conv1" ) __a = model_dic.get("wkj" ) __a = model_dic.get("vji" ) __a = model_dic.get("thre_conv1" ) __a = model_dic.get("thre_bp2" ) __a = model_dic.get("thre_bp3" ) return conv_ins def a__ ( self , lowerCamelCase ): return 1 / (1 + np.exp(-1 * x )) def a__ ( self , lowerCamelCase ): return round(lowerCamelCase , 3 ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): # convolution process __a = convs[0] __a = convs[1] __a = np.shape(lowerCamelCase )[0] # get the data slice of original image data, data_focus __a = [] for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase ): for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase ): __a = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix __a = [] __a = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase ): __a = [] for i_focus in range(len(lowerCamelCase ) ): __a = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase ) ) __a = np.asmatrix(lowerCamelCase ).reshape( lowerCamelCase , lowerCamelCase ) data_featuremap.append(lowerCamelCase ) # expanding the data slice to One dimenssion __a = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase ) ) __a = np.asarray(lowerCamelCase ) return focus_list, data_featuremap def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="average_pool" ): # pooling process __a = len(featuremaps[0] ) __a = int(size_map / size_pooling ) __a = [] for i_map in range(len(lowerCamelCase ) ): __a = featuremaps[i_map] __a = [] for i_focus in range(0 , lowerCamelCase , lowerCamelCase ): for j_focus in range(0 , lowerCamelCase , lowerCamelCase ): __a = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase ) ) __a = np.asmatrix(lowerCamelCase ).reshape(lowerCamelCase , lowerCamelCase ) featuremap_pooled.append(lowerCamelCase ) return featuremap_pooled def a__ ( self , lowerCamelCase ): # expanding three dimension data to one dimension list __a = [] for i in range(len(lowerCamelCase ) ): __a = np.shape(data[i] ) __a = data[i].reshape(1 , shapes[0] * shapes[1] ) __a = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase ) __a = np.asarray(lowerCamelCase ) return data_expanded def a__ ( self , lowerCamelCase ): # expanding matrix to one dimension list __a = np.asarray(lowerCamelCase ) __a = np.shape(lowerCamelCase ) __a = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = [] __a = 0 for i_map in range(lowerCamelCase ): __a = np.ones((size_map, size_map) ) for i in range(0 , lowerCamelCase , lowerCamelCase ): for j in range(0 , lowerCamelCase , lowerCamelCase ): __a = pd_pool[ i_pool ] __a = i_pool + 1 __a = np.multiply( lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase ) return pd_all def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(lowerCamelCase )) ) print((" - - Shape: Teach_Data ", np.shape(lowerCamelCase )) ) __a = 0 __a = [] __a = 10000 while rp < n_repeat and mse >= error_accuracy: __a = 0 print(F"-------------Learning Time {rp}--------------" ) for p in range(len(lowerCamelCase ) ): # print('------------Learning Image: %d--------------'%p) __a = np.asmatrix(datas_train[p] ) __a = np.asarray(datas_teach[p] ) __a , __a = self.convolute( lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __a = self.pooling(lowerCamelCase , self.size_poolinga ) __a = np.shape(lowerCamelCase ) __a = self._expand(lowerCamelCase ) __a = data_bp_input __a = np.dot(lowerCamelCase , self.vji.T ) - self.thre_bpa __a = self.sig(lowerCamelCase ) __a = np.dot(lowerCamelCase , self.wkj.T ) - self.thre_bpa __a = self.sig(lowerCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- __a = np.multiply( (data_teach - bp_outa) , np.multiply(lowerCamelCase , (1 - bp_outa) ) ) __a = np.multiply( np.dot(lowerCamelCase , self.wkj ) , np.multiply(lowerCamelCase , (1 - bp_outa) ) ) __a = np.dot(lowerCamelCase , self.vji ) __a = pd_i_all / (self.size_poolinga * self.size_poolinga) __a = pd_conva_pooled.T.getA().tolist() __a = self._calculate_gradient_from_pool( lowerCamelCase , lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): __a = self._expand_mat(pd_conva_all[k_conv] ) __a = self.rate_weight * np.dot(lowerCamelCase , lowerCamelCase ) __a = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) __a = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer __a = self.wkj + pd_k_all.T * bp_outa * self.rate_weight __a = self.vji + pd_j_all.T * bp_outa * self.rate_weight __a = self.thre_bpa - pd_k_all * self.rate_thre __a = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image __a = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) __a = rp + 1 __a = error_count / patterns all_mse.append(lowerCamelCase ) def draw_error(): __a = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase , "+-" ) plt.plot(lowerCamelCase , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(lowerCamelCase , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") ) if draw_e: draw_error() return mse def a__ ( self , lowerCamelCase ): # model predict __a = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(lowerCamelCase )) ) for p in range(len(lowerCamelCase ) ): __a = np.asmatrix(datas_test[p] ) __a , __a = self.convolute( lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __a = self.pooling(lowerCamelCase , self.size_poolinga ) __a = self._expand(lowerCamelCase ) __a = data_bp_input __a = bp_outa * self.vji.T - self.thre_bpa __a = self.sig(lowerCamelCase ) __a = bp_outa * self.wkj.T - self.thre_bpa __a = self.sig(lowerCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) __a = [list(map(self.do_round , lowerCamelCase ) ) for each in produce_out] return np.asarray(lowerCamelCase ) def a__ ( self , lowerCamelCase ): # return the data of image after convoluting process so we can check it out __a = np.asmatrix(lowerCamelCase ) __a , __a = self.convolute( lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __a = self.pooling(lowerCamelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
261
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Optional[Any] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
261
1
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _lowerCamelCase( a , a , a = None ): if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release: # old versions of hfh don't url-encode the file path __a = quote(a ) return hfh.hf_hub_url(a , a , repo_type="dataset" , revision=a )
261
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ): __a , __a = row, column __a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )] def __str__( self ): __a = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __a = 0 for row_vector in self.array: for obj in row_vector: __a = max(lowerCamelCase , len(str(lowerCamelCase ) ) ) __a = F"%{max_element_length}s" # Make string and return def single_line(lowerCamelCase ) -> str: nonlocal string_format_identifier __a = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self ): return str(self ) def a__ ( self , lowerCamelCase ): if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCamelCase , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) __a = value def __add__( self , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] + another[r, c] return result def __neg__( self ): __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = -self[r, c] return result def __sub__( self , lowerCamelCase ): return self + (-another) def __mul__( self , lowerCamelCase ): if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] * another return result elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication assert self.column == another.row __a = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a = F"Unsupported type given for another ({type(lowerCamelCase )})" raise TypeError(lowerCamelCase ) def a__ ( self ): __a = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] return result def a__ ( self , lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a = v.transpose() __a = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _lowerCamelCase( ): # a^(-1) __a = Matrix(3 , 3 , 0 ) for i in range(3 ): __a = 1 print(F"a^(-1) is {ainv}" ) # u, v __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 1, 2, -3 __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" ) def _lowerCamelCase( ): import doctest doctest.testmod() testa()
261
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__:List[Any] = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:List[str] = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:List[Any] = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:List[str] = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _lowerCamelCase( a , a , a , a , a=True , a="pt" ): __a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {} __a = padding_side return tokenizer( [line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , ) def _lowerCamelCase( a , a , a=None , ): __a = input_ids.ne(a ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ): super().__init__() __a = Path(lowerCamelCase ).joinpath(type_path + ".source" ) __a = Path(lowerCamelCase ).joinpath(type_path + ".target" ) __a = self.get_char_lens(self.src_file ) __a = max_source_length __a = max_target_length assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}" __a = tokenizer __a = prefix if n_obs is not None: __a = self.src_lens[:n_obs] __a = src_lang __a = tgt_lang def __len__( self ): return len(self.src_lens ) def __getitem__( self , lowerCamelCase ): __a = index + 1 # linecache starts at 1 __a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" ) __a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" ) assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __a = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer ) __a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" ) __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" ) __a = source_inputs["input_ids"].squeeze() __a = target_inputs["input_ids"].squeeze() __a = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( lowerCamelCase ): return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()] def a__ ( self , lowerCamelCase ): __a = torch.stack([x["input_ids"] for x in batch] ) __a = torch.stack([x["attention_mask"] for x in batch] ) __a = torch.stack([x["decoder_input_ids"] for x in batch] ) __a = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = trim_batch(lowerCamelCase , lowerCamelCase ) __a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase ) __a = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__) def _lowerCamelCase( a ): return list(itertools.chain.from_iterable(a ) ) def _lowerCamelCase( a ): __a = get_git_info() save_json(a , os.path.join(a , "git_log.json" ) ) def _lowerCamelCase( a , a , a=4 , **a ): with open(a , "w" ) as f: json.dump(a , a , indent=a , **a ) def _lowerCamelCase( a ): with open(a ) as f: return json.load(a ) def _lowerCamelCase( ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def _lowerCamelCase( a , a ): return list(map(a , a ) ) def _lowerCamelCase( a , a ): with open(a , "wb" ) as f: return pickle.dump(a , a ) def _lowerCamelCase( a ): def remove_articles(a ): return re.sub(R"\b(a|an|the)\b" , " " , a ) def white_space_fix(a ): return " ".join(text.split() ) def remove_punc(a ): __a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) ) def _lowerCamelCase( a , a ): __a = normalize_answer(a ).split() __a = normalize_answer(a ).split() __a = Counter(a ) & Counter(a ) __a = sum(common.values() ) if num_same == 0: return 0 __a = 1.0 * num_same / len(a ) __a = 1.0 * num_same / len(a ) __a = (2 * precision * recall) / (precision + recall) return fa def _lowerCamelCase( a , a ): return normalize_answer(a ) == normalize_answer(a ) def _lowerCamelCase( a , a ): assert len(a ) == len(a ) __a = 0 for hypo, pred in zip(a , a ): em += exact_match_score(a , a ) if len(a ) > 0: em /= len(a ) return {"em": em} def _lowerCamelCase( a ): return model_prefix.startswith("rag" ) def _lowerCamelCase( a , a , a ): __a = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __a = "dropout_rate" for p in extra_params: if getattr(a , a , a ): if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(a ) ) delattr(a , a ) continue __a = p if hasattr(a , a ) else equivalent_param[p] setattr(a , a , getattr(a , a ) ) delattr(a , a ) return hparams, config
261
1
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class snake_case__ ( snake_case_ ): _snake_case : int = ComputeEnvironment.AMAZON_SAGEMAKER _snake_case : Dict = True _snake_case : Optional[Any] = """ml.p3.2xlarge""" _snake_case : int = """accelerate_sagemaker_execution_role""" _snake_case : Optional[Any] = """hf-sm""" _snake_case : Tuple = """us-east-1""" _snake_case : List[str] = 1 _snake_case : int = """accelerate-sagemaker-1""" _snake_case : Dict = """1.6""" _snake_case : Union[str, Any] = """4.4""" _snake_case : Tuple = """train.py""" _snake_case : Tuple = [ """--model_name_or_path""", """bert""", """--do_train""", """False""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] _snake_case : Union[str, Any] = [ """--model_name_or_path""", """bert""", """--do_train""", """--do_test""", """False""", """--do_predict""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] class snake_case__ ( unittest.TestCase ): def a__ ( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. __a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] , lowerCamelCase ) assert isinstance(converted_args["do_train"] , lowerCamelCase ) assert isinstance(converted_args["epochs"] , lowerCamelCase ) assert isinstance(converted_args["learning_rate"] , lowerCamelCase ) assert isinstance(converted_args["max_steps"] , lowerCamelCase ) with pytest.raises(lowerCamelCase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
261
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case__ ( snake_case_ ): _snake_case : "DiagonalGaussianDistribution" class snake_case__ ( snake_case_, snake_case_ ): _snake_case : Optional[Any] = True @register_to_config def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ): super().__init__() # pass init params to Encoder __a = Encoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , ) # pass init params to Decoder __a = Decoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , ) __a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 ) __a = False __a = False # only relevant if vae tiling is enabled __a = self.config.sample_size __a = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __a = 0.25 def a__ ( self , lowerCamelCase , lowerCamelCase=False ): if isinstance(lowerCamelCase , (Encoder, Decoder) ): __a = value def a__ ( self , lowerCamelCase = True ): __a = use_tiling def a__ ( self ): self.enable_tiling(lowerCamelCase ) def a__ ( self ): __a = True def a__ ( self ): __a = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a__ ( self ): __a = {} def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return processors def a__ ( self , lowerCamelCase ): __a = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): if not isinstance(lowerCamelCase , lowerCamelCase ): module.set_processor(lowerCamelCase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: __a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase ) __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_slicing and z.shape[0] > 1: __a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self._decode(lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[2] , b.shape[2] , lowerCamelCase ) for y in range(lowerCamelCase ): __a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[3] , b.shape[3] , lowerCamelCase ) for x in range(lowerCamelCase ): __a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_latent_min_size * self.tile_overlap_factor ) __a = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __a = [] for i in range(0 , x.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , x.shape[3] , lowerCamelCase ): __a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_sample_min_size * self.tile_overlap_factor ) __a = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __a = [] for i in range(0 , z.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , z.shape[3] , lowerCamelCase ): __a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ): __a = sample __a = self.encode(lowerCamelCase ).latent_dist if sample_posterior: __a = posterior.sample(generator=lowerCamelCase ) else: __a = posterior.mode() __a = self.decode(lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase )
261
1
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : Any = CpmAntTokenizer _snake_case : Optional[Any] = False def a__ ( self ): super().setUp() __a = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] __a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) @tooslow def a__ ( self ): __a = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" ) __a = "今天天气真好!" __a = ["今天", "天气", "真", "好", "!"] __a = tokenizer.tokenize(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __a = "今天天气真好!" __a = [tokenizer.bos_token] + tokens __a = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase ) __a = tokenizer.decode(lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase )
261
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __a = feature_size __a = sampling_rate __a = padding_value __a = kwargs.pop("padding_side" , "right" ) __a = kwargs.pop("return_attention_mask" , lowerCamelCase ) super().__init__(**lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) __a = processed_features[self.model_input_names[0]] __a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCamelCase ) == 0: if return_attention_mask: __a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __a = required_input[0] if isinstance(lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCamelCase ): __a = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCamelCase ): __a = "tf" elif is_torch_tensor(lowerCamelCase ): __a = "pt" elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ): __a = "np" else: raise ValueError( F"type of {first_element} unknown: {type(lowerCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __a = to_numpy(lowerCamelCase ) else: __a = [to_numpy(lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase ) __a = processed_features[self.model_input_names[0]] __a = len(lowerCamelCase ) if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) __a = [] for i in range(lowerCamelCase ): __a = {k: v[i] for k, v in processed_features.items()} # truncation __a = self._truncate( lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , ) truncated_inputs.append(lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __a = PaddingStrategy.MAX_LENGTH __a = {} for i in range(lowerCamelCase ): # padding __a = self._pad( truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __a = [] if value.dtype is np.dtype(np.floataa ): __a = value.astype(np.floataa ) batch_outputs[key].append(lowerCamelCase ) return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ): __a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __a = len(lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __a = np.ones(len(lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: __a = max_length - len(lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (0, difference) ) __a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (difference, 0) ) __a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) __a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = len(lowerCamelCase ) > max_length if needs_to_be_truncated: __a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __a = processed_features["attention_mask"][:max_length] return processed_features def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ): # Get padding strategy if padding is not False: if padding is True: __a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCamelCase , lowerCamelCase ): __a = PaddingStrategy(lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = padding else: __a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
261
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart SCREAMING_SNAKE_CASE__:List[Any] = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } SCREAMING_SNAKE_CASE__:List[Any] = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def _lowerCamelCase( ): __a = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __a = bs[:] __a = 0 for b in range(2**8 ): if b not in bs: bs.append(a ) cs.append(2**8 + n ) n += 1 __a = [chr(a ) for n in cs] return dict(zip(a , a ) ) def _lowerCamelCase( a ): __a = set() __a = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __a = char return pairs class snake_case__ ( snake_case_ ): _snake_case : Union[str, Any] = VOCAB_FILES_NAMES _snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP _snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ["""input_ids""", """attention_mask"""] def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , **lowerCamelCase , ): __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __a = json.load(lowerCamelCase ) __a = {v: k for k, v in self.encoder.items()} __a = errors # how to handle errors in decoding __a = bytes_to_unicode() __a = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __a = merges_handle.read().split("\n" )[1:-1] __a = [tuple(merge.split() ) for merge in bpe_merges] __a = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __a = {} __a = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __a = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def a__ ( self ): return len(self.encoder ) def a__ ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self , lowerCamelCase ): if token in self.cache: return self.cache[token] __a = tuple(lowerCamelCase ) __a = get_pairs(lowerCamelCase ) if not pairs: return token while True: __a = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __a , __a = bigram __a = [] __a = 0 while i < len(lowerCamelCase ): try: __a = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __a = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __a = tuple(lowerCamelCase ) __a = new_word if len(lowerCamelCase ) == 1: break else: __a = get_pairs(lowerCamelCase ) __a = " ".join(lowerCamelCase ) __a = word return word def a__ ( self , lowerCamelCase ): __a = [] for token in re.findall(self.pat , lowerCamelCase ): __a = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def a__ ( self , lowerCamelCase ): return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def a__ ( self , lowerCamelCase ): return self.decoder.get(lowerCamelCase ) def a__ ( self , lowerCamelCase ): __a = "".join(lowerCamelCase ) __a = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if not os.path.isdir(lowerCamelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __a = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __a = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) __a = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def a__ ( self , lowerCamelCase , lowerCamelCase = None ): __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self , lowerCamelCase , lowerCamelCase=False , **lowerCamelCase ): __a = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __a = " " + text return (text, kwargs)
261
"""simple docstring""" from collections import Counter from timeit import timeit def _lowerCamelCase( a = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def _lowerCamelCase( a = "" ): if len(a ) == 0: return True __a = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __a = {} for character in lower_case_input_str: __a = character_freq_dict.get(a , 0 ) + 1 __a = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCamelCase( a = "" ): print("\nFor string = " , a , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
261
1
"""simple docstring""" def _lowerCamelCase( a = 1_0_0_0 ): __a = 3 __a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 1_5 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
261
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin SCREAMING_SNAKE_CASE__:Any = random.Random() if is_torch_available(): import torch def _lowerCamelCase( a , a=1.0 , a=None , a=None ): if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ): __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = feature_size __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize def a__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ): def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __a = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ASTFeatureExtractor def a__ ( self ): __a = ASTFeatureExtractionTester(self ) def a__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a = np.asarray(lowerCamelCase ) __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) @require_torch def a__ ( self ): import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(100 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a__ ( self , lowerCamelCase ): from datasets import load_dataset __a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def a__ ( self ): # fmt: off __a = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on __a = self._load_datasamples(1 ) __a = ASTFeatureExtractor() __a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
261
1
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a__ ( self ): __a , __a = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) __a = "A painting of a squirrel eating a burger" __a = jax.device_count() __a = num_samples * [prompt] __a = sd_pipe.prepare_inputs(lowerCamelCase ) __a = replicate(lowerCamelCase ) __a = shard(lowerCamelCase ) __a = jax.random.PRNGKey(0 ) __a = jax.random.split(lowerCamelCase , jax.device_count() ) __a = sd_pipe(lowerCamelCase , lowerCamelCase , lowerCamelCase , num_inference_steps=25 , jit=lowerCamelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a = images[0, 253:256, 253:256, -1] __a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def a__ ( self ): __a = "stabilityai/stable-diffusion-2" __a , __a = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCamelCase , subfolder="scheduler" ) __a , __a = FlaxStableDiffusionPipeline.from_pretrained( lowerCamelCase , scheduler=lowerCamelCase , revision="bf16" , dtype=jnp.bfloataa , ) __a = scheduler_params __a = "A painting of a squirrel eating a burger" __a = jax.device_count() __a = num_samples * [prompt] __a = sd_pipe.prepare_inputs(lowerCamelCase ) __a = replicate(lowerCamelCase ) __a = shard(lowerCamelCase ) __a = jax.random.PRNGKey(0 ) __a = jax.random.split(lowerCamelCase , jax.device_count() ) __a = sd_pipe(lowerCamelCase , lowerCamelCase , lowerCamelCase , num_inference_steps=25 , jit=lowerCamelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a = images[0, 253:256, 253:256, -1] __a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
261
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class snake_case__ ( snake_case_, snake_case_ ): @register_to_config def __init__( self , lowerCamelCase = 768 , ): super().__init__() __a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) ) __a = nn.Parameter(torch.ones(1 , lowerCamelCase ) ) def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ): __a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) ) __a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) ) return self def a__ ( self , lowerCamelCase ): __a = (embeds - self.mean) * 1.0 / self.std return embeds def a__ ( self , lowerCamelCase ): __a = (embeds * self.std) + self.mean return embeds
261
1
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE__:List[str] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
1
"""simple docstring""" def _lowerCamelCase( a ): if not head: return True # split the list to two parts __a , __a = head.next, head while fast and fast.next: __a = fast.next.next __a = slow.next __a = slow.next __a = None # Don't forget here! But forget still works! # reverse the second part __a = None while second: __a = second.next __a = node __a = second __a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False __a = node.next __a = head.next return True def _lowerCamelCase( a ): if not head or not head.next: return True # 1. Get the midpoint (slow) __a = __a = __a = head while fast and fast.next: __a , __a = fast.next.next, slow.next # 2. Push the second half into the stack __a = [slow.val] while slow.next: __a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False __a = cur.next return True def _lowerCamelCase( a ): if not head or not head.next: return True __a = {} __a = 0 while head: if head.val in d: d[head.val].append(a ) else: __a = [pos] __a = head.next pos += 1 __a = pos - 1 __a = 0 for v in d.values(): if len(a ) % 2 != 0: middle += 1 else: __a = 0 for i in range(0 , len(a ) ): if v[i] + v[len(a ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
261
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device SCREAMING_SNAKE_CASE__:Optional[Any] = False class snake_case__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) __a = torch.manual_seed(0 ) __a = pipe.dual_guided( prompt="first prompt" , image=lowerCamelCase , text_to_image_strength=0.75 , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase ) __a = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = generator.manual_seed(0 ) __a = pipe.dual_guided( prompt="first prompt" , image=lowerCamelCase , text_to_image_strength=0.75 , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a__ ( self ): __a = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = "cyberpunk 2077" __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) __a = torch.manual_seed(0 ) __a = pipe.dual_guided( prompt=lowerCamelCase , image=lowerCamelCase , text_to_image_strength=0.75 , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images __a = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __a = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __a = "A painting of a squirrel eating a burger " __a = torch.manual_seed(0 ) __a = pipe.text_to_image( prompt=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __a = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __a = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __a = pipe.image_variation(lowerCamelCase , generator=lowerCamelCase , output_type="numpy" ).images __a = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __a = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
261
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
1
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge SCREAMING_SNAKE_CASE__:int = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] SCREAMING_SNAKE_CASE__:List[Any] = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def _lowerCamelCase( ): __a = calculate_rouge(a , a , bootstrap_aggregation=a , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(a , a ) __a = calculate_rouge(a , a , bootstrap_aggregation=a , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def _lowerCamelCase( ): __a = "rougeLsum" __a = calculate_rouge(a , a , newline_sep=a , rouge_keys=[k] )[k] __a = calculate_rouge(a , a , newline_sep=a , rouge_keys=[k] )[k] assert score > score_no_sep def _lowerCamelCase( ): __a = ["rouge1", "rouge2", "rougeL"] __a = calculate_rouge(a , a , newline_sep=a , rouge_keys=a ) __a = calculate_rouge(a , a , newline_sep=a , rouge_keys=a ) assert score_sep == score_no_sep def _lowerCamelCase( ): __a = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] __a = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(a , a , newline_sep=a ) == calculate_rouge(a , a , newline_sep=a ) def _lowerCamelCase( ): __a = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] __a = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] __a = calculate_rouge(a , a , rouge_keys=["rougeLsum"] , newline_sep=a )["rougeLsum"] __a = calculate_rouge(a , a , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def _lowerCamelCase( ): __a = Path("examples/seq2seq/test_data/wmt_en_ro" ) __a = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(a , a ) __a = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=a ) assert isinstance(a , a )
261
"""simple docstring""" import random def _lowerCamelCase( a , a , a ): __a = a[left_index] __a = left_index + 1 for j in range(left_index + 1 , a ): if a[j] < pivot: __a , __a = a[i], a[j] i += 1 __a , __a = a[i - 1], a[left_index] return i - 1 def _lowerCamelCase( a , a , a ): if left < right: __a = random.randint(a , right - 1 ) __a , __a = ( a[left], a[pivot], ) # switches the pivot with the left most bound __a = partition(a , a , a ) quick_sort_random( a , a , a ) # recursive quicksort to the left of the pivot point quick_sort_random( a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point def _lowerCamelCase( ): __a = input("Enter numbers separated by a comma:\n" ).strip() __a = [int(a ) for item in user_input.split("," )] quick_sort_random(a , 0 , len(a ) ) print(a ) if __name__ == "__main__": main()
261
1
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__) def _lowerCamelCase( a , a ): def run_func(a ): @wraps(a ) def run_in_eager_mode(*a , **a ): return func(*a , **a ) @wraps(a ) @tf.function(experimental_compile=a ) def run_in_graph_mode(*a , **a ): return func(*a , **a ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _lowerCamelCase( a , a , a ): __a = random.Random() __a = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(a , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class snake_case__ ( snake_case_ ): _snake_case : TensorFlowBenchmarkArguments _snake_case : PretrainedConfig _snake_case : str = "TensorFlow" @property def a__ ( self ): return tf.__version__ def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): # initialize GPU on separate process __a = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __a = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __a = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) __a = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __a = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) __a = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __a = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) __a = ( hasattr(lowerCamelCase , "architectures" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model __a = __import__("transformers" , fromlist=[model_class] ) __a = getattr(lowerCamelCase , lowerCamelCase ) __a = model_cls(lowerCamelCase ) except ImportError: raise ImportError( F"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: __a = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently __a = config.vocab_size if hasattr(lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size __a = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) __a = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." ) if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) __a = ( hasattr(lowerCamelCase , "architectures" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model __a = __import__("transformers" , fromlist=[model_class] ) __a = getattr(lowerCamelCase , lowerCamelCase ) __a = model_cls(lowerCamelCase ) except ImportError: raise ImportError( F"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: __a = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently __a = config.vocab_size if hasattr(lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size __a = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): __a = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] __a = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): __a = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] __a = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients __a = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def a__ ( self , lowerCamelCase ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average __a = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"Doesn't fit on GPU. {e}" ) def a__ ( self , lowerCamelCase ): logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) __a = start_memory_tracing("transformers" ) if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) __a = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() __a = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) __a = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) __a = meminfo.used __a = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) __a = None else: __a = measure_peak_memory_cpu(lowerCamelCase ) __a = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: __a = stop_memory_tracing(lowerCamelCase ) if memory is None: __a = summary.total else: __a = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"Doesn't fit on GPU. {e}" ) return "N/A", None
261
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
1
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline SCREAMING_SNAKE_CASE__:int = """path-to-your-trained-model""" SCREAMING_SNAKE_CASE__:Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") SCREAMING_SNAKE_CASE__:Any = """A photo of sks dog in a bucket""" SCREAMING_SNAKE_CASE__:Optional[int] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
261
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
1
"""simple docstring""" def _lowerCamelCase( a ): if edge <= 0 or not isinstance(a , a ): raise ValueError("Length must be a positive." ) return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _lowerCamelCase( a ): if edge <= 0 or not isinstance(a , a ): raise ValueError("Length must be a positive." ) return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
261
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = """upernet""" def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = backbone_config.get("model_type" ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(lowerCamelCase ) __a = backbone_config __a = hidden_size __a = initializer_range __a = pool_scales __a = use_auxiliary_head __a = auxiliary_loss_weight __a = auxiliary_in_channels __a = auxiliary_channels __a = auxiliary_num_convs __a = auxiliary_concat_input __a = loss_ignore_index def a__ ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
261
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin SCREAMING_SNAKE_CASE__:Any = random.Random() if is_torch_available(): import torch def _lowerCamelCase( a , a=1.0 , a=None , a=None ): if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ): __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = feature_size __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize def a__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ): def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __a = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ASTFeatureExtractor def a__ ( self ): __a = ASTFeatureExtractionTester(self ) def a__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a = np.asarray(lowerCamelCase ) __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) @require_torch def a__ ( self ): import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(100 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a__ ( self , lowerCamelCase ): from datasets import load_dataset __a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def a__ ( self ): # fmt: off __a = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on __a = self._load_datasamples(1 ) __a = ASTFeatureExtractor() __a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
261
"""simple docstring""" def _lowerCamelCase( a = 1_0_0_0 ): __a = 3 __a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 1_5 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
261
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__:Optional[int] = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:List[Any] = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__:List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
"""simple docstring""" import operator def _lowerCamelCase( a , a = False , a = None ): __a = operator.lt if reverse else operator.gt __a = solution or [] if not arr: return solution __a = [arr.pop(0 )] for i, item in enumerate(a ): if _operator(a , sublist[-1] ): sublist.append(a ) arr.pop(a ) # merging sublist into solution list if not solution: solution.extend(a ) else: while sublist: __a = sublist.pop(0 ) for i, xx in enumerate(a ): if not _operator(a , a ): solution.insert(a , a ) break else: solution.append(a ) strand_sort(a , a , a ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
261
1
"""simple docstring""" def _lowerCamelCase( a , a , a , a ): # Return True if there is node that has not iterated. __a = [False] * len(a ) __a = [] queue.append(a ) __a = True while queue: __a = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(a ) __a = True __a = u return visited[t] def _lowerCamelCase( a , a , a ): # This array is filled by BFS and to store path __a = [-1] * (len(a )) __a = 0 while bfs(a , a , a , a ): __a = float("Inf" ) __a = sink while s != source: # Find the minimum value in select path __a = min(a , graph[parent[s]][s] ) __a = parent[s] max_flow += path_flow __a = sink while v != source: __a = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __a = parent[v] return max_flow SCREAMING_SNAKE_CASE__:List[Any] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Union[str, Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
261
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
1
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:List[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__:List[str] = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: __a = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a = "weight" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) def _lowerCamelCase( a , a , a , a , a ): __a = full_name.split("conv_layers." )[-1] __a = name.split("." ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(a ) @torch.no_grad() def _lowerCamelCase( a , a , a=None ): # load the pre-trained checkpoints __a = torch.load(a ) __a = WavLMConfigOrig(checkpoint["cfg"] ) __a = WavLMOrig(a ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: __a = WavLMConfig.from_pretrained(a ) else: __a = WavLMConfig() __a = WavLMModel(a ) recursively_load_weights(a , a ) hf_wavlm.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") SCREAMING_SNAKE_CASE__:Any = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
261
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
261
1
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase = "▁" , lowerCamelCase = True , lowerCamelCase = "<unk>" , lowerCamelCase = "</s>" , lowerCamelCase = "<pad>" , ): __a = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } __a = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): __a = token_dict["token"] __a = Tokenizer(Unigram() ) __a = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ) , " " ), normalizers.Lowercase(), ] ) __a = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase ), pre_tokenizers.Digits(individual_digits=lowerCamelCase ), pre_tokenizers.Punctuation(), ] ) __a = decoders.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase ) __a = TemplateProcessing( single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) __a = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(lowerCamelCase , lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = 8000 , lowerCamelCase = True , ): __a = trainers.UnigramTrainer( vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , ) if isinstance(lowerCamelCase , lowerCamelCase ): __a = [files] self._tokenizer.train(lowerCamelCase , trainer=lowerCamelCase ) self.add_unk_id() def a__ ( self , lowerCamelCase , lowerCamelCase = 8000 , lowerCamelCase = True , ): __a = trainers.UnigramTrainer( vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , ) self._tokenizer.train_from_iterator(lowerCamelCase , trainer=lowerCamelCase ) self.add_unk_id() def a__ ( self ): __a = json.loads(self._tokenizer.to_str() ) __a = self.special_tokens["unk"]["id"] __a = Tokenizer.from_str(json.dumps(lowerCamelCase ) )
261
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__:Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor __a = hf_model.adapter for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) __a = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(a , a , a , a ) __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "bias" in name: __a = "bias" elif "weight" in name: __a = "weight" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) def _lowerCamelCase( a , a , a , a , a ): __a = full_name.split("conv_layers." )[-1] __a = name.split("." ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a , a , a , a ): __a = full_name.split("adaptor." )[-1] __a = name.split("." ) if items[1].isdigit(): __a = int(items[1] ) else: __a = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." __a = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." __a = value logger.info(F"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(a , a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a ): __a , __a = emb.weight.shape __a = nn.Linear(a , a , bias=a ) __a = emb.weight.data return lin_layer @torch.no_grad() def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ): __a = WavaVecaConfig.from_pretrained( a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , ) __a = MBartConfig.from_pretrained(a ) # load model __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) __a = model[0].eval() # load feature extractor __a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a ) # set weights for wav2vec2 encoder __a = WavaVecaModel(a ) recursively_load_weights_wavaveca(model.encoder , a ) # load decoder weights __a = MBartForCausalLM(a ) __a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a ) logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) __a = SpeechEncoderDecoderModel(encoder=a , decoder=a ) __a = False __a = MBartaaTokenizer(a ) tokenizer.save_pretrained(a ) __a = hf_wavavec.config.to_dict() __a = tokenizer.pad_token_id __a = tokenizer.bos_token_id __a = tokenizer.eos_token_id __a = "mbart50" __a = "wav2vec2" __a = tokenizer.eos_token_id __a = 2_5_0_0_0_4 __a = tokenizer.eos_token_id __a = SpeechEncoderDecoderConfig.from_dict(a ) hf_wavavec.save_pretrained(a ) feature_extractor.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""") SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
261
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = StableDiffusionSAGPipeline _snake_case : Any = TEXT_TO_IMAGE_PARAMS _snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : int = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case : Dict = False def a__ ( self ): torch.manual_seed(0 ) __a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) __a = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , ) torch.manual_seed(0 ) __a = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) __a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __a = CLIPTextModel(lowerCamelCase ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __a = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def a__ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) __a = sag_pipe.to(lowerCamelCase ) sag_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = "." __a = torch.manual_seed(0 ) __a = sag_pipe( [prompt] , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) __a = output.images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def a__ ( self ): __a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) __a = sag_pipe.to(lowerCamelCase ) sag_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = "." __a = torch.manual_seed(0 ) __a = sag_pipe( [prompt] , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) __a = output.images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def a__ ( self ): __a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) __a = sag_pipe.to(lowerCamelCase ) sag_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = "." __a = torch.manual_seed(0 ) __a = sag_pipe( [prompt] , width=768 , height=512 , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , ) __a = output.images assert image.shape == (1, 512, 768, 3)
261
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__:str = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Tuple = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = '''swin2sr''' __snake_case = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , __UpperCAmelCase : int=64 , __UpperCAmelCase : int=1 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=180 , __UpperCAmelCase : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : Union[str, Any]=2.0 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : str=False , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[Any]=1e-5 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=1.0 , __UpperCAmelCase : List[Any]="1conv" , __UpperCAmelCase : int="pixelshuffle" , **__UpperCAmelCase : str , ) ->Tuple: """simple docstring""" super().__init__(**__UpperCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = depths a = len(__UpperCAmelCase ) a = num_heads a = window_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = use_absolute_embeddings a = layer_norm_eps a = initializer_range a = upscale a = img_range a = resi_connection a = upsampler
0
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__) def _lowerCamelCase( a ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(a , "git_log.json" ) , "w" ) as f: json.dump(a , a , indent=4 ) def _lowerCamelCase( a ): if params.n_gpu <= 0: __a = 0 __a = -1 __a = True __a = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __a = int(os.environ["WORLD_SIZE"] ) __a = int(os.environ["N_GPU_NODE"] ) __a = int(os.environ["RANK"] ) # number of nodes / node ID __a = params.world_size // params.n_gpu_per_node __a = params.global_rank // params.n_gpu_per_node __a = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __a = 1 __a = 0 __a = 0 __a = 0 __a = 1 __a = 1 __a = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __a = params.node_id == 0 and params.local_rank == 0 __a = params.n_nodes > 1 # summary __a = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def _lowerCamelCase( a ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
261
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def lowerCAmelCase_ ( snake_case_ : str ) -> bool: '''simple docstring''' UpperCAmelCase_ = credit_card_number UpperCAmelCase_ = 0 UpperCAmelCase_ = len(snake_case_ ) - 2 for i in range(snake_case_ , -1 , -2 ): # double the value of every second digit UpperCAmelCase_ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCAmelCase_ = cc_number[:i] + str(snake_case_ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(snake_case_ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def lowerCAmelCase_ ( snake_case_ : str ) -> bool: '''simple docstring''' UpperCAmelCase_ = f"""{credit_card_number} is an invalid credit card number because""" if not credit_card_number.isdigit(): print(f"""{error_message} it has nonnumerical characters.""" ) return False if not 13 <= len(snake_case_ ) <= 16: print(f"""{error_message} of its length.""" ) return False if not validate_initial_digits(snake_case_ ): print(f"""{error_message} of its first two digits.""" ) return False if not luhn_validation(snake_case_ ): print(f"""{error_message} it fails the Luhn check.""" ) return False print(f"""{credit_card_number} is a valid credit card number.""" ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Optional[Any] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
261
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A = 1_000 ) -> int: """simple docstring""" return sum(e for e in range(3 , A ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f"""{solution() = }""")
2
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ): __a , __a = row, column __a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )] def __str__( self ): __a = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __a = 0 for row_vector in self.array: for obj in row_vector: __a = max(lowerCamelCase , len(str(lowerCamelCase ) ) ) __a = F"%{max_element_length}s" # Make string and return def single_line(lowerCamelCase ) -> str: nonlocal string_format_identifier __a = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self ): return str(self ) def a__ ( self , lowerCamelCase ): if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCamelCase , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) __a = value def __add__( self , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] + another[r, c] return result def __neg__( self ): __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = -self[r, c] return result def __sub__( self , lowerCamelCase ): return self + (-another) def __mul__( self , lowerCamelCase ): if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] * another return result elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication assert self.column == another.row __a = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a = F"Unsupported type given for another ({type(lowerCamelCase )})" raise TypeError(lowerCamelCase ) def a__ ( self ): __a = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] return result def a__ ( self , lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a = v.transpose() __a = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _lowerCamelCase( ): # a^(-1) __a = Matrix(3 , 3 , 0 ) for i in range(3 ): __a = 1 print(F"a^(-1) is {ainv}" ) # u, v __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 1, 2, -3 __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" ) def _lowerCamelCase( ): import doctest doctest.testmod() testa()
261
0
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase : str = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=True ): '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' ) A, A, A, A : List[Any] = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: A : Any = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models ) A : List[Any] = config_class.from_json_file(snake_case__ ) A : int = True A : str = True print(F'Building TensorFlow model from configuration: {config}' ) A : List[str] = model_class(snake_case__ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): A : Tuple = cached_file( snake_case__ , snake_case__ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: A : List[str] = load_pytorch_checkpoint_in_tfa_model(snake_case__ , snake_case__ ) if compare_with_pt_model: A : Dict = tf_model(tf_model.dummy_inputs , training=snake_case__ ) # build the network A : List[Any] = torch.load(snake_case__ , map_location='''cpu''' ) A : Dict = pt_model_class.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) with torch.no_grad(): A : Union[str, Any] = pt_model(**pt_model.dummy_inputs ) A : Tuple = pto[0].numpy() A : List[str] = tfo[0].numpy() A : Tuple = np.amax(np.abs(np_pt - np_tf ) ) print(F'Max absolute difference between models outputs {diff}' ) assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}' # Save pytorch-model print(F'Save TensorFlow model to {tf_dump_path}' ) tf_model.save_weights(snake_case__ , save_format='''h5''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=False , ): '''simple docstring''' if args_model_type is None: A : int = list(MODEL_CLASSES.keys() ) else: A : Any = [args_model_type] for j, model_type in enumerate(snake_case__ , start=1 ): print('''=''' * 100 ) print(F' Converting model type {j}/{len(snake_case__ )}: {model_type}' ) print('''=''' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' ) A, A, A, A, A : Optional[int] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: A : Any = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: A : List[Any] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(snake_case__ , snake_case__ ) , start=1 ): print('''-''' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F' Skipping finetuned checkpoint {model_shortcut_name}' ) continue A : Tuple = model_shortcut_name elif only_convert_finetuned_models: print(F' Skipping not finetuned checkpoint {model_shortcut_name}' ) continue print( F' Converting checkpoint {i}/{len(snake_case__ )}: {model_shortcut_name} - model_type {model_type}' ) print('''-''' * 100 ) if config_shortcut_name in aws_config_map: A : int = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models ) else: A : Union[str, Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: A : Optional[int] = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models ) else: A : List[Any] = model_shortcut_name if os.path.isfile(snake_case__ ): A : Any = '''converted_model''' convert_pt_checkpoint_to_tf( model_type=snake_case__ , pytorch_checkpoint_path=snake_case__ , config_file=snake_case__ , tf_dump_path=os.path.join(snake_case__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=snake_case__ , ) if remove_cached_files: os.remove(snake_case__ ) os.remove(snake_case__ ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.' ) parser.add_argument( '--model_type', default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' 'convert all the models from AWS.' ), ) parser.add_argument( '--pytorch_checkpoint_path', default=None, type=str, help=( 'Path to the PyTorch checkpoint path or shortcut name to download from AWS. ' 'If not given, will download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--config_file', default=None, type=str, help=( 'The config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture. If not given and ' '--pytorch_checkpoint_path is not given or is a shortcut name ' 'use the configuration associated to the shortcut name on the AWS' ), ) parser.add_argument( '--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.' ) parser.add_argument( '--use_cached_models', action='store_true', help='Use cached models if possible instead of updating to latest checkpoint versions.', ) parser.add_argument( '--remove_cached_files', action='store_true', help='Remove pytorch models after conversion (save memory when converting in batches).', ) parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.') lowercase : Optional[Any] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
3
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _lowerCamelCase( a , a , a , a , a=True , a="pt" ): __a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {} __a = padding_side return tokenizer( [line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , ) def _lowerCamelCase( a , a , a=None , ): __a = input_ids.ne(a ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ): super().__init__() __a = Path(lowerCamelCase ).joinpath(type_path + ".source" ) __a = Path(lowerCamelCase ).joinpath(type_path + ".target" ) __a = self.get_char_lens(self.src_file ) __a = max_source_length __a = max_target_length assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}" __a = tokenizer __a = prefix if n_obs is not None: __a = self.src_lens[:n_obs] __a = src_lang __a = tgt_lang def __len__( self ): return len(self.src_lens ) def __getitem__( self , lowerCamelCase ): __a = index + 1 # linecache starts at 1 __a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" ) __a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" ) assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __a = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer ) __a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" ) __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" ) __a = source_inputs["input_ids"].squeeze() __a = target_inputs["input_ids"].squeeze() __a = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( lowerCamelCase ): return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()] def a__ ( self , lowerCamelCase ): __a = torch.stack([x["input_ids"] for x in batch] ) __a = torch.stack([x["attention_mask"] for x in batch] ) __a = torch.stack([x["decoder_input_ids"] for x in batch] ) __a = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = trim_batch(lowerCamelCase , lowerCamelCase ) __a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase ) __a = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__) def _lowerCamelCase( a ): return list(itertools.chain.from_iterable(a ) ) def _lowerCamelCase( a ): __a = get_git_info() save_json(a , os.path.join(a , "git_log.json" ) ) def _lowerCamelCase( a , a , a=4 , **a ): with open(a , "w" ) as f: json.dump(a , a , indent=a , **a ) def _lowerCamelCase( a ): with open(a ) as f: return json.load(a ) def _lowerCamelCase( ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def _lowerCamelCase( a , a ): return list(map(a , a ) ) def _lowerCamelCase( a , a ): with open(a , "wb" ) as f: return pickle.dump(a , a ) def _lowerCamelCase( a ): def remove_articles(a ): return re.sub(R"\b(a|an|the)\b" , " " , a ) def white_space_fix(a ): return " ".join(text.split() ) def remove_punc(a ): __a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) ) def _lowerCamelCase( a , a ): __a = normalize_answer(a ).split() __a = normalize_answer(a ).split() __a = Counter(a ) & Counter(a ) __a = sum(common.values() ) if num_same == 0: return 0 __a = 1.0 * num_same / len(a ) __a = 1.0 * num_same / len(a ) __a = (2 * precision * recall) / (precision + recall) return fa def _lowerCamelCase( a , a ): return normalize_answer(a ) == normalize_answer(a ) def _lowerCamelCase( a , a ): assert len(a ) == len(a ) __a = 0 for hypo, pred in zip(a , a ): em += exact_match_score(a , a ) if len(a ) > 0: em /= len(a ) return {"em": em} def _lowerCamelCase( a ): return model_prefix.startswith("rag" ) def _lowerCamelCase( a , a , a ): __a = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __a = "dropout_rate" for p in extra_params: if getattr(a , a , a ): if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(a ) ) delattr(a , a ) continue __a = p if hasattr(a , a ) else equivalent_param[p] setattr(a , a , getattr(a , a ) ) delattr(a , a ) return hparams, config
261
0
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def a_ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Tuple=1024 , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase ) lowerCAmelCase = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='train' , **lowerCamelCase ) lowerCAmelCase = tok.pad_token_id def get_lens(lowerCamelCase : List[Any] ): lowerCAmelCase = tqdm( DataLoader(lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCAmelCase = [] for batch in dl: lowerCAmelCase = batch['input_ids'].ne(lowerCamelCase ).sum(1 ).tolist() lowerCAmelCase = batch['labels'].ne(lowerCamelCase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCamelCase , lowerCamelCase ): max_lens.append(max(lowerCamelCase , lowerCamelCase ) ) else: max_lens.extend(lowerCamelCase ) return max_lens lowerCAmelCase = get_lens(lowerCamelCase ) lowerCAmelCase = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='val' , **lowerCamelCase ) lowerCAmelCase = get_lens(lowerCamelCase ) pickle_save(lowerCamelCase , train_ds.len_file ) pickle_save(lowerCamelCase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
4
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case__ ( snake_case_ ): _snake_case : "DiagonalGaussianDistribution" class snake_case__ ( snake_case_, snake_case_ ): _snake_case : Optional[Any] = True @register_to_config def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ): super().__init__() # pass init params to Encoder __a = Encoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , ) # pass init params to Decoder __a = Decoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , ) __a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 ) __a = False __a = False # only relevant if vae tiling is enabled __a = self.config.sample_size __a = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __a = 0.25 def a__ ( self , lowerCamelCase , lowerCamelCase=False ): if isinstance(lowerCamelCase , (Encoder, Decoder) ): __a = value def a__ ( self , lowerCamelCase = True ): __a = use_tiling def a__ ( self ): self.enable_tiling(lowerCamelCase ) def a__ ( self ): __a = True def a__ ( self ): __a = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a__ ( self ): __a = {} def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return processors def a__ ( self , lowerCamelCase ): __a = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): if not isinstance(lowerCamelCase , lowerCamelCase ): module.set_processor(lowerCamelCase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: __a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase ) __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_slicing and z.shape[0] > 1: __a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self._decode(lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[2] , b.shape[2] , lowerCamelCase ) for y in range(lowerCamelCase ): __a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[3] , b.shape[3] , lowerCamelCase ) for x in range(lowerCamelCase ): __a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_latent_min_size * self.tile_overlap_factor ) __a = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __a = [] for i in range(0 , x.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , x.shape[3] , lowerCamelCase ): __a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_sample_min_size * self.tile_overlap_factor ) __a = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __a = [] for i in range(0 , z.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , z.shape[3] , lowerCamelCase ): __a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ): __a = sample __a = self.encode(lowerCamelCase ).latent_dist if sample_posterior: __a = posterior.sample(generator=lowerCamelCase ) else: __a = posterior.mode() __a = self.decode(lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase )
261
0
import math from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCamelCase__ ( lowerCAmelCase): SCREAMING_SNAKE_CASE__ = '''data2vec-audio''' def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_6 , UpperCAmelCase=1_9 , UpperCAmelCase=5 , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=0 , UpperCAmelCase="sum" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]: super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase ) _lowercase =hidden_size _lowercase =feat_extract_activation _lowercase =list(UpperCAmelCase ) _lowercase =list(UpperCAmelCase ) _lowercase =list(UpperCAmelCase ) _lowercase =conv_bias _lowercase =num_conv_pos_embeddings _lowercase =num_conv_pos_embedding_groups _lowercase =conv_pos_kernel_size _lowercase =len(self.conv_dim ) _lowercase =num_hidden_layers _lowercase =intermediate_size _lowercase =hidden_act _lowercase =num_attention_heads _lowercase =hidden_dropout _lowercase =attention_dropout _lowercase =activation_dropout _lowercase =feat_proj_dropout _lowercase =final_dropout _lowercase =layerdrop _lowercase =layer_norm_eps _lowercase =initializer_range _lowercase =vocab_size _lowercase =use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowercase =mask_time_prob _lowercase =mask_time_length _lowercase =mask_time_min_masks _lowercase =mask_feature_prob _lowercase =mask_feature_length _lowercase =mask_feature_min_masks # ctc loss _lowercase =ctc_loss_reduction _lowercase =ctc_zero_infinity # adapter _lowercase =add_adapter _lowercase =adapter_kernel_size _lowercase =adapter_stride _lowercase =num_adapter_layers _lowercase =output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowercase =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowercase =list(UpperCAmelCase ) _lowercase =list(UpperCAmelCase ) _lowercase =list(UpperCAmelCase ) _lowercase =xvector_output_dim @property def __A (self ) -> int: return math.prod(self.conv_stride )
5
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __a = feature_size __a = sampling_rate __a = padding_value __a = kwargs.pop("padding_side" , "right" ) __a = kwargs.pop("return_attention_mask" , lowerCamelCase ) super().__init__(**lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) __a = processed_features[self.model_input_names[0]] __a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCamelCase ) == 0: if return_attention_mask: __a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __a = required_input[0] if isinstance(lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCamelCase ): __a = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCamelCase ): __a = "tf" elif is_torch_tensor(lowerCamelCase ): __a = "pt" elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ): __a = "np" else: raise ValueError( F"type of {first_element} unknown: {type(lowerCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __a = to_numpy(lowerCamelCase ) else: __a = [to_numpy(lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase ) __a = processed_features[self.model_input_names[0]] __a = len(lowerCamelCase ) if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) __a = [] for i in range(lowerCamelCase ): __a = {k: v[i] for k, v in processed_features.items()} # truncation __a = self._truncate( lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , ) truncated_inputs.append(lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __a = PaddingStrategy.MAX_LENGTH __a = {} for i in range(lowerCamelCase ): # padding __a = self._pad( truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __a = [] if value.dtype is np.dtype(np.floataa ): __a = value.astype(np.floataa ) batch_outputs[key].append(lowerCamelCase ) return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ): __a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __a = len(lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __a = np.ones(len(lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: __a = max_length - len(lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (0, difference) ) __a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (difference, 0) ) __a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) __a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = len(lowerCamelCase ) > max_length if needs_to_be_truncated: __a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __a = processed_features["attention_mask"][:max_length] return processed_features def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ): # Get padding strategy if padding is not False: if padding is True: __a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCamelCase , lowerCamelCase ): __a = PaddingStrategy(lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = padding else: __a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
261
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __A( a ): snake_case_ = 42 snake_case_ = 42 class __A( nn.Module ): snake_case_ = 42 snake_case_ = (1_6, 3_2, 9_6, 2_5_6) snake_case_ = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __a = [] for i in range(len(self.block_out_channels ) - 1 ): __a = self.block_out_channels[i] __a = self.block_out_channels[i + 1] __a = nn.Conv( _snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_snake_case ) __a = nn.Conv( _snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_snake_case ) __a = blocks __a = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , _snake_case ) -> int: '''simple docstring''' __a = self.conv_in(_snake_case ) __a = nn.silu(_snake_case ) for block in self.blocks: __a = block(_snake_case ) __a = nn.silu(_snake_case ) __a = self.conv_out(_snake_case ) return embedding @flax_register_to_config class __A( nn.Module , a , a ): snake_case_ = 3_2 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = False snake_case_ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1_2_8_0 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = "rgb" snake_case_ = (1_6, 3_2, 9_6, 2_5_6) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> FrozenDict: '''simple docstring''' __a = (1, self.in_channels, self.sample_size, self.sample_size) __a = jnp.zeros(_snake_case , dtype=jnp.floataa ) __a = jnp.ones((1,) , dtype=jnp.intaa ) __a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __a = (1, 3, self.sample_size * 8, self.sample_size * 8) __a = jnp.zeros(_snake_case , dtype=jnp.floataa ) __a , __a = jax.random.split(_snake_case ) __a = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )["params"] def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.block_out_channels __a = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __a = self.num_attention_heads or self.attention_head_dim # input __a = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __a = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __a = FlaxTimestepEmbedding(_snake_case , dtype=self.dtype ) __a = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) __a = self.only_cross_attention if isinstance(_snake_case , _snake_case ): __a = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_snake_case , _snake_case ): __a = (num_attention_heads,) * len(self.down_block_types ) # down __a = [] __a = [] __a = block_out_channels[0] __a = nn.Conv( _snake_case , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_snake_case ) for i, down_block_type in enumerate(self.down_block_types ): __a = output_channel __a = block_out_channels[i] __a = i == len(_snake_case ) - 1 if down_block_type == "CrossAttnDownBlock2D": __a = FlaxCrossAttnDownBlockaD( in_channels=_snake_case , out_channels=_snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: __a = FlaxDownBlockaD( in_channels=_snake_case , out_channels=_snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_snake_case ) for _ in range(self.layers_per_block ): __a = nn.Conv( _snake_case , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_snake_case ) if not is_final_block: __a = nn.Conv( _snake_case , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_snake_case ) __a = down_blocks __a = controlnet_down_blocks # mid __a = block_out_channels[-1] __a = FlaxUNetMidBlockaDCrossAttn( in_channels=_snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) __a = nn.Conv( _snake_case , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = 1.0 , _snake_case = True , _snake_case = False , ) -> Union[FlaxControlNetOutput, Tuple]: '''simple docstring''' __a = self.controlnet_conditioning_channel_order if channel_order == "bgr": __a = jnp.flip(_snake_case , axis=1 ) # 1. time if not isinstance(_snake_case , jnp.ndarray ): __a = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0: __a = timesteps.astype(dtype=jnp.floataa ) __a = jnp.expand_dims(_snake_case , 0 ) __a = self.time_proj(_snake_case ) __a = self.time_embedding(_snake_case ) # 2. pre-process __a = jnp.transpose(_snake_case , (0, 2, 3, 1) ) __a = self.conv_in(_snake_case ) __a = jnp.transpose(_snake_case , (0, 2, 3, 1) ) __a = self.controlnet_cond_embedding(_snake_case ) sample += controlnet_cond # 3. down __a = (sample,) for down_block in self.down_blocks: if isinstance(_snake_case , _snake_case ): __a , __a = down_block(_snake_case , _snake_case , _snake_case , deterministic=not train ) else: __a , __a = down_block(_snake_case , _snake_case , deterministic=not train ) down_block_res_samples += res_samples # 4. mid __a = self.mid_block(_snake_case , _snake_case , _snake_case , deterministic=not train ) # 5. contronet blocks __a = () for down_block_res_sample, controlnet_block in zip(_snake_case , self.controlnet_down_blocks ): __a = controlnet_block(_snake_case ) controlnet_down_block_res_samples += (down_block_res_sample,) __a = controlnet_down_block_res_samples __a = self.controlnet_mid_block(_snake_case ) # 6. scaling __a = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_snake_case , mid_block_res_sample=_snake_case )
6
"""simple docstring""" from collections import Counter from timeit import timeit def _lowerCamelCase( a = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def _lowerCamelCase( a = "" ): if len(a ) == 0: return True __a = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __a = {} for character in lower_case_input_str: __a = character_freq_dict.get(a , 0 ) + 1 __a = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCamelCase( a = "" ): print("\nFor string = " , a , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
261
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin SCREAMING_SNAKE_CASE__:Any = random.Random() if is_torch_available(): import torch def _lowerCamelCase( a , a=1.0 , a=None , a=None ): if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ): __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = feature_size __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize def a__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ): def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __a = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ASTFeatureExtractor def a__ ( self ): __a = ASTFeatureExtractionTester(self ) def a__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a = np.asarray(lowerCamelCase ) __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) @require_torch def a__ ( self ): import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(100 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a__ ( self , lowerCamelCase ): from datasets import load_dataset __a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def a__ ( self ): # fmt: off __a = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on __a = self._load_datasamples(1 ) __a = ASTFeatureExtractor() __a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
261
0
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase_ = '''hf-internal-testing/tiny-random-bert''' lowerCAmelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowerCAmelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any ) ->List[Any]: snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase ) ) ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: snake_case_ = f.read() self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) self.assertTrue(os.path.isfile(_UpperCamelCase ) ) # File is cached at the same place the second time. snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) # Using a specific revision to test the full commit hash. snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) def snake_case__( self : Tuple ) ->Optional[int]: with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): snake_case_ = cached_file('''tiny-random-bert''' , _UpperCamelCase ) with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): snake_case_ = cached_file(_UpperCamelCase , '''conf''' ) def snake_case__( self : Optional[int] ) ->int: with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): snake_case_ = cached_file(_UpperCamelCase , '''conf''' ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: snake_case_ = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , '''.no_exist''' , _UpperCamelCase , '''conf''' ) ) ) snake_case_ = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) snake_case_ = cached_file(_UpperCamelCase , '''conf''' , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) snake_case_ = mock.Mock() snake_case_ = 5_0_0 snake_case_ = {} snake_case_ = HTTPError snake_case_ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head: snake_case_ = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) # This check we did call the fake head request mock_head.assert_called() def snake_case__( self : Dict ) ->Optional[int]: self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) def snake_case__( self : Optional[int] ) ->str: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCamelCase , revision='''ahaha''' ) snake_case_ = get_file_from_repo('''bert-base-cased''' , _UpperCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. snake_case_ = json.loads(open(_UpperCamelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_6_8 ) def snake_case__( self : Optional[Any] ) ->Any: with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = Path(_UpperCamelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCamelCase , '''a.txt''' ) , str(_UpperCamelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCamelCase , '''b.txt''' ) )
8
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class snake_case__ ( snake_case_, snake_case_ ): @register_to_config def __init__( self , lowerCamelCase = 768 , ): super().__init__() __a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) ) __a = nn.Parameter(torch.ones(1 , lowerCamelCase ) ) def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ): __a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) ) __a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) ) return self def a__ ( self , lowerCamelCase ): __a = (embeds - self.mean) * 1.0 / self.std return embeds def a__ ( self , lowerCamelCase ): __a = (embeds * self.std) + self.mean return embeds
261
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __lowerCAmelCase : str =( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def _UpperCamelCase ( lowercase__ , lowercase__ ): warnings.warn(lowercase__ , lowercase__ ) requires_backends(lowercase__ , '''sklearn''' ) return (preds == labels).mean() def _UpperCamelCase ( lowercase__ , lowercase__ ): warnings.warn(lowercase__ , lowercase__ ) requires_backends(lowercase__ , '''sklearn''' ) __SCREAMING_SNAKE_CASE : str = simple_accuracy(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE : Dict = fa_score(y_true=lowercase__ , y_pred=lowercase__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _UpperCamelCase ( lowercase__ , lowercase__ ): warnings.warn(lowercase__ , lowercase__ ) requires_backends(lowercase__ , '''sklearn''' ) __SCREAMING_SNAKE_CASE : List[str] = pearsonr(lowercase__ , lowercase__ )[0] __SCREAMING_SNAKE_CASE : Union[str, Any] = spearmanr(lowercase__ , lowercase__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): warnings.warn(lowercase__ , lowercase__ ) requires_backends(lowercase__ , '''sklearn''' ) assert len(lowercase__ ) == len(lowercase__ ), F'''Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(lowercase__ , lowercase__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(lowercase__ , lowercase__ )} elif task_name == "mrpc": return acc_and_fa(lowercase__ , lowercase__ ) elif task_name == "sts-b": return pearson_and_spearman(lowercase__ , lowercase__ ) elif task_name == "qqp": return acc_and_fa(lowercase__ , lowercase__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(lowercase__ , lowercase__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(lowercase__ , lowercase__ )} elif task_name == "qnli": return {"acc": simple_accuracy(lowercase__ , lowercase__ )} elif task_name == "rte": return {"acc": simple_accuracy(lowercase__ , lowercase__ )} elif task_name == "wnli": return {"acc": simple_accuracy(lowercase__ , lowercase__ )} elif task_name == "hans": return {"acc": simple_accuracy(lowercase__ , lowercase__ )} else: raise KeyError(lowercase__ ) def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): warnings.warn(lowercase__ , lowercase__ ) requires_backends(lowercase__ , '''sklearn''' ) if len(lowercase__ ) != len(lowercase__ ): raise ValueError(F'''Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(lowercase__ , lowercase__ )} else: raise KeyError(lowercase__ )
9
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE__:List[str] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
0
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __A = typing.Union[np.floataa, int, float] # noqa: UP007 def lowerCAmelCase_ ( __a , __a ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) ) def lowerCAmelCase_ ( __a , __a ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2) if __name__ == "__main__": def lowerCAmelCase_ ( ) -> None: """simple docstring""" from timeit import timeit print("Without Numpy" ) print( timeit( "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) ) print("With Numpy" ) print( timeit( "euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) ) benchmark()
10
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
0
import math class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase=0) -> str: # a graph with Node 0,1,...,N-1 _A : Tuple = n _A : Optional[int] = [ [math.inf for j in range(0 , __lowerCamelCase)] for i in range(0 , __lowerCamelCase) ] # adjacency matrix for weight _A : List[str] = [ [math.inf for j in range(0 , __lowerCamelCase)] for i in range(0 , __lowerCamelCase) ] # dp[i][j] stores minimum distance from i to j def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int: _A : Dict = w def _lowerCamelCase ( self) -> Union[str, Any]: for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): _A : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: return self.dp[u][v] if __name__ == "__main__": lowerCAmelCase__ = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
11
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
0
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCAmelCase_ = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ = importlib.util.spec_from_file_location( 'transformers', os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) UpperCAmelCase_ = spec.loader.load_module() UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCAmelCase_ = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)') UpperCAmelCase_ = { 'CLIPConfigMixin', 'DecisionTransformerConfigMixin', 'EncoderDecoderConfigMixin', 'RagConfigMixin', 'SpeechEncoderDecoderConfigMixin', 'VisionEncoderDecoderConfigMixin', 'VisionTextDualEncoderConfigMixin', } def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = [] for config_class in list(CONFIG_MAPPING.values() ): __lowerCamelCase = False # source code of `config_class` __lowerCamelCase = inspect.getsource(A__ ) __lowerCamelCase = _re_checkpoint.findall(A__ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` __lowerCamelCase, __lowerCamelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link __lowerCamelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: __lowerCamelCase = True break __lowerCamelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A__ ) if len(A__ ) > 0: __lowerCamelCase = """\n""".join(sorted(A__ ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
12
"""simple docstring""" import random def _lowerCamelCase( a , a , a ): __a = a[left_index] __a = left_index + 1 for j in range(left_index + 1 , a ): if a[j] < pivot: __a , __a = a[i], a[j] i += 1 __a , __a = a[i - 1], a[left_index] return i - 1 def _lowerCamelCase( a , a , a ): if left < right: __a = random.randint(a , right - 1 ) __a , __a = ( a[left], a[pivot], ) # switches the pivot with the left most bound __a = partition(a , a , a ) quick_sort_random( a , a , a ) # recursive quicksort to the left of the pivot point quick_sort_random( a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point def _lowerCamelCase( ): __a = input("Enter numbers separated by a comma:\n" ).strip() __a = [int(a ) for item in user_input.split("," )] quick_sort_random(a , 0 , len(a ) ) print(a ) if __name__ == "__main__": main()
261
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) SCREAMING_SNAKE_CASE_: Dict = get_activation("gelu") self.assertTrue(torch.allclose(gelu_python(lowerCAmelCase__) , torch_builtin(lowerCAmelCase__))) self.assertFalse(torch.allclose(gelu_python(lowerCAmelCase__) , gelu_new(lowerCAmelCase__))) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) SCREAMING_SNAKE_CASE_: Optional[int] = get_activation("gelu") SCREAMING_SNAKE_CASE_: List[str] = get_activation("gelu_10") SCREAMING_SNAKE_CASE_: Union[str, Any] = torch_builtin(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = geluaa(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = torch.where(y_gelu_aa < 10.0 , 1 , 0) self.assertTrue(torch.max(lowerCAmelCase__).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask)) def _SCREAMING_SNAKE_CASE ( self : Tuple): get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(lowerCAmelCase__): get_activation("bogus") with self.assertRaises(lowerCAmelCase__): get_activation(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: List[str] = get_activation("gelu") SCREAMING_SNAKE_CASE_: Optional[int] = 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = get_activation("gelu") self.assertEqual(acta.a , 1) with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Optional[int] = acta.a
13
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
0
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : Optional[int]=0.01 , UpperCAmelCase__ : Optional[Any]=1_000) ->Optional[int]: '''simple docstring''' A__ = p_stop A__ = max_length def __iter__( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = 0 A__ = False while not stop and count < self.max_length: yield count count += 1 A__ = random.random() < self.p_stop class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=True) ->Optional[Any]: '''simple docstring''' A__ = [ BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) for i in range(2) ] A__ = [list(UpperCAmelCase__) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCAmelCase__) for shard in batch_sampler_shards] , [len(UpperCAmelCase__) for e in expected]) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Any: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size. A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size. A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: '''simple docstring''' A__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] A__ = [BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , even_batches=UpperCAmelCase__) for i in range(2)] self.assertEqual(len(batch_sampler_shards[0]) , 3) self.assertEqual(len(batch_sampler_shards[1]) , 2) self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]]) self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 10, 11]]) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[str]=False) ->Optional[Any]: '''simple docstring''' random.seed(UpperCAmelCase__) A__ = list(UpperCAmelCase__) A__ = [ IterableDatasetShard( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ , num_processes=UpperCAmelCase__ , process_index=UpperCAmelCase__ , split_batches=UpperCAmelCase__ , ) for i in range(UpperCAmelCase__) ] A__ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCAmelCase__) iterable_dataset_lists.append(list(UpperCAmelCase__)) A__ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size A__ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__)) self.assertTrue(len(UpperCAmelCase__) % shard_batch_size == 0) A__ = [] for idx in range(0 , len(UpperCAmelCase__) , UpperCAmelCase__): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCAmelCase__) < len(UpperCAmelCase__): reference += reference self.assertListEqual(UpperCAmelCase__ , reference[: len(UpperCAmelCase__)]) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ = 42 A__ = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Edge case with a very small dataset A__ = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = BatchSampler(range(16) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = SkipBatchSampler(UpperCAmelCase__ , 2) self.assertListEqual(list(UpperCAmelCase__) , [[8, 9, 10, 11], [12, 13, 14, 15]]) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ = SkipDataLoader(list(range(16)) , batch_size=4 , skip_batches=2) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]]) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = DataLoader(list(range(16)) , batch_size=4) A__ = skip_first_batches(UpperCAmelCase__ , num_batches=2) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]]) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ = DataLoaderShard(list(range(16)) , batch_size=4) for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' Accelerator() A__ = DataLoaderDispatcher(range(16) , batch_size=4) for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3)
14
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : pyspark.sql.DataFrame ,A : Optional[NamedSplit] = None ,A : Optional[Features] = None ,A : bool = True ,A : str = None ,A : bool = False ,A : str = None ,A : bool = True ,A : str = "arrow" ,**A : List[Any] ,): super().__init__( split=A ,features=A ,cache_dir=A ,keep_in_memory=A ,streaming=A ,**A ,) __A = load_from_cache_file __A = file_format __A = Spark( df=A ,features=A ,cache_dir=A ,working_dir=A ,**A ,) def UpperCamelCase_ ( self : Optional[Any] ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=A ,file_format=self._file_format ,) return self.builder.as_dataset(split=self.split )
15
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = """upernet""" def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = backbone_config.get("model_type" ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(lowerCamelCase ) __a = backbone_config __a = hidden_size __a = initializer_range __a = pool_scales __a = use_auxiliary_head __a = auxiliary_loss_weight __a = auxiliary_in_channels __a = auxiliary_channels __a = auxiliary_num_convs __a = auxiliary_concat_input __a = loss_ignore_index def a__ ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
261
0
"""simple docstring""" from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __UpperCAmelCase ( __lowerCamelCase = "" ) -> dict[str, float]: lowercase__ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' lowercase__ : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , '''html.parser''' ) lowercase__ : Dict = soup.find_all('''td''' , attrs='''titleColumn''' ) lowercase__ : List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(__lowerCamelCase , __lowerCamelCase ) } def __UpperCAmelCase ( __lowerCamelCase = "IMDb_Top_250_Movies.csv" ) -> None: lowercase__ : str = get_imdb_top_aaa_movies() with open(__lowerCamelCase , '''w''' , newline='''''' ) as out_file: lowercase__ : Dict = csv.writer(__lowerCamelCase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
16
"""simple docstring""" def _lowerCamelCase( a = 1_0_0_0 ): __a = 3 __a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 1_5 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
261
0
"""simple docstring""" import os import sys import unittest _a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a = os.path.join(git_repo_path, 'src', 'transformers') _a = '\n{0} = None\n' _a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' _a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): __lowercase = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCAmelCase__ ) __lowercase = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCAmelCase__, "tokenizers" ) __lowercase = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCAmelCase__, "tensorflow_text" ) __lowercase = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCAmelCase__, "sentencepiece_and_tokenizers" ) __lowercase = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCAmelCase__, "sentencepiece_and_tensorflow_text" ) __lowercase = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCAmelCase__, "sentencepiece_and_tokenizers_and_vision" ) def _lowercase ( self : Tuple ): __lowercase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", UpperCAmelCase__ ) self.assertIn("tensorflow_text", UpperCAmelCase__ ) self.assertIn("sentencepiece_and_tokenizers", UpperCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel", objects["torch"] ) self.assertIn("TFBertModel", objects["tf"] ) self.assertIn("FlaxBertModel", objects["flax"] ) self.assertIn("BertModel", objects["torch"] ) self.assertIn("TFBertTokenizer", objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"] ) def _lowercase ( self : Optional[Any] ): __lowercase = create_dummy_object("CONSTANT", "'torch'" ) self.assertEqual(UpperCAmelCase__, "\nCONSTANT = None\n" ) __lowercase = create_dummy_object("function", "'torch'" ) self.assertEqual( UpperCAmelCase__, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) __lowercase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" __lowercase = create_dummy_object("FakeClass", "'torch'" ) self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ ) def _lowercase ( self : List[Any] ): __lowercase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" __lowercase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"], UpperCAmelCase__ )
17
"""simple docstring""" import operator def _lowerCamelCase( a , a = False , a = None ): __a = operator.lt if reverse else operator.gt __a = solution or [] if not arr: return solution __a = [arr.pop(0 )] for i, item in enumerate(a ): if _operator(a , sublist[-1] ): sublist.append(a ) arr.pop(a ) # merging sublist into solution list if not solution: solution.extend(a ) else: while sublist: __a = sublist.pop(0 ) for i, xx in enumerate(a ): if not _operator(a , a ): solution.insert(a , a ) break else: solution.append(a ) strand_sort(a , a , a ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
261
0
from __future__ import annotations import math __lowerCamelCase : Tuple = '''2020.9.26''' __lowerCamelCase : Any = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if not all(isinstance(lowerCAmelCase , (float, int) ) for val in locals().values() ): SCREAMING_SNAKE_CASE_ : List[Any] = f'Input values must either be float or int: {list(locals().values() )}' raise TypeError(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] = ((x * distance) / (z + distance)) * scale SCREAMING_SNAKE_CASE_ : int = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : str , lowerCAmelCase : float ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError("Axis must be a str" ) SCREAMING_SNAKE_CASE_ : Any = locals() del input_variables["axis"] if not all(isinstance(lowerCAmelCase , (float, int) ) for val in input_variables.values() ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( "Input values except axis must either be float or int: " f'{list(input_variables.values() )}' ) raise TypeError(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi if axis == "z": SCREAMING_SNAKE_CASE_ : Any = x * math.cos(lowerCAmelCase ) - y * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : int = y * math.cos(lowerCAmelCase ) + x * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = z elif axis == "x": SCREAMING_SNAKE_CASE_ : str = y * math.cos(lowerCAmelCase ) - z * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict = z * math.cos(lowerCAmelCase ) + y * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = x elif axis == "y": SCREAMING_SNAKE_CASE_ : Any = x * math.cos(lowerCAmelCase ) - z * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : int = z * math.cos(lowerCAmelCase ) + x * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : str = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''') print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
18
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
0
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def lowerCamelCase_ ( lowerCamelCase__ ): return EnvironmentCommand() class _SCREAMING_SNAKE_CASE ( snake_case_ ): @staticmethod def SCREAMING_SNAKE_CASE_( lowercase ) -> int: lowerCamelCase_ = parser.add_parser("env" ) download_parser.set_defaults(func=lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ = huggingface_hub.__version__ lowerCamelCase_ = "not installed" lowerCamelCase_ = "NA" if is_torch_available(): import torch lowerCamelCase_ = torch.__version__ lowerCamelCase_ = torch.cuda.is_available() lowerCamelCase_ = "not installed" if is_transformers_available(): import transformers lowerCamelCase_ = transformers.__version__ lowerCamelCase_ = "not installed" if is_accelerate_available(): import accelerate lowerCamelCase_ = accelerate.__version__ lowerCamelCase_ = "not installed" if is_xformers_available(): import xformers lowerCamelCase_ = xformers.__version__ lowerCamelCase_ = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(lowercase ) ) return info @staticmethod def SCREAMING_SNAKE_CASE_( lowercase ) -> List[Any]: return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
19
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
261
0
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() lowercase : List[Any] = logging.get_logger(__name__) lowercase : List[str] = """The Nymphenburg Palace is a beautiful palace in Munich!""" def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: lowercase : int = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1_024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1_024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowercase : List[Any] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowercase : Union[str, Any] = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=SCREAMING_SNAKE_CASE__ , output_all_encodings=SCREAMING_SNAKE_CASE__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , SCREAMING_SNAKE_CASE__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowercase : Any = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowercase : Optional[int] = os.path.join(get_home_dir() , """models""" ) lowercase : List[Any] = _load_vocab(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls=SCREAMING_SNAKE_CASE__ ) lowercase : int = nlp.model.BERTModel( SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=SCREAMING_SNAKE_CASE__ , use_token_type_embed=SCREAMING_SNAKE_CASE__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=SCREAMING_SNAKE_CASE__ , use_decoder=SCREAMING_SNAKE_CASE__ , ) original_bort.load_parameters(SCREAMING_SNAKE_CASE__ , cast_dtype=SCREAMING_SNAKE_CASE__ , ignore_extra=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 lowercase : Union[str, Any] = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(SCREAMING_SNAKE_CASE__ ), } lowercase : Dict = BertConfig.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : Any = BertForMaskedLM(SCREAMING_SNAKE_CASE__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(SCREAMING_SNAKE_CASE__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = hf_param.shape lowercase : int = to_torch(params[gluon_param] ) lowercase : str = gluon_param.shape assert ( shape_hf == shape_gluon ), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param lowercase : List[str] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowercase : Dict = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowercase : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowercase : int = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowercase : Tuple = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowercase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention lowercase : BertSelfAttention = layer.attention.self lowercase : Any = check_and_map_params( self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) lowercase : str = check_and_map_params( self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) lowercase : Any = check_and_map_params( self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) lowercase : List[str] = check_and_map_params( self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) lowercase : List[str] = check_and_map_params( self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) lowercase : Optional[Any] = check_and_map_params( self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output lowercase : BertSelfOutput = layer.attention.output lowercase : List[Any] = check_and_map_params( self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" ) lowercase : Optional[Any] = check_and_map_params( self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" ) lowercase : Union[str, Any] = check_and_map_params( self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" ) lowercase : List[str] = check_and_map_params( self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate lowercase : BertIntermediate = layer.intermediate lowercase : Dict = check_and_map_params( intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) lowercase : str = check_and_map_params( intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output lowercase : BertOutput = layer.output lowercase : Optional[Any] = check_and_map_params( bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) lowercase : int = check_and_map_params( bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) lowercase : str = check_and_map_params( bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) lowercase : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowercase : List[Any] = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowercase : int = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ )["""input_ids"""] # Get gluon output lowercase : Any = mx.nd.array([input_ids] ) lowercase : List[Any] = original_bort(inputs=SCREAMING_SNAKE_CASE__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) lowercase : str = BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) hf_bort_model.eval() lowercase : Union[str, Any] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) lowercase : List[str] = hf_bort_model(**SCREAMING_SNAKE_CASE__ )[0] lowercase : str = output_gluon[0].asnumpy() lowercase : Optional[int] = output_hf[0].detach().numpy() lowercase : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowercase : List[str] = np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase : Union[str, Any] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
20
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__:Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor __a = hf_model.adapter for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) __a = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(a , a , a , a ) __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "bias" in name: __a = "bias" elif "weight" in name: __a = "weight" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) def _lowerCamelCase( a , a , a , a , a ): __a = full_name.split("conv_layers." )[-1] __a = name.split("." ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a , a , a , a ): __a = full_name.split("adaptor." )[-1] __a = name.split("." ) if items[1].isdigit(): __a = int(items[1] ) else: __a = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." __a = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." __a = value logger.info(F"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(a , a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a ): __a , __a = emb.weight.shape __a = nn.Linear(a , a , bias=a ) __a = emb.weight.data return lin_layer @torch.no_grad() def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ): __a = WavaVecaConfig.from_pretrained( a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , ) __a = MBartConfig.from_pretrained(a ) # load model __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) __a = model[0].eval() # load feature extractor __a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a ) # set weights for wav2vec2 encoder __a = WavaVecaModel(a ) recursively_load_weights_wavaveca(model.encoder , a ) # load decoder weights __a = MBartForCausalLM(a ) __a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a ) logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) __a = SpeechEncoderDecoderModel(encoder=a , decoder=a ) __a = False __a = MBartaaTokenizer(a ) tokenizer.save_pretrained(a ) __a = hf_wavavec.config.to_dict() __a = tokenizer.pad_token_id __a = tokenizer.bos_token_id __a = tokenizer.eos_token_id __a = "mbart50" __a = "wav2vec2" __a = tokenizer.eos_token_id __a = 2_5_0_0_0_4 __a = tokenizer.eos_token_id __a = SpeechEncoderDecoderConfig.from_dict(a ) hf_wavavec.save_pretrained(a ) feature_extractor.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""") SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
261
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = "▁" SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "spiece.model"} SCREAMING_SNAKE_CASE : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } SCREAMING_SNAKE_CASE : Optional[Any] = { "google/pegasus-xsum": 512, } SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class _lowerCamelCase( _a ): lowercase_ : List[Any] = VOCAB_FILES_NAMES lowercase_ : Any = VOCAB_FILES_NAMES lowercase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self, lowerCamelCase, lowerCamelCase="<pad>", lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase="<mask_2>", lowerCamelCase="<mask_1>", lowerCamelCase=None, lowerCamelCase=1_03, lowerCamelCase = None, **lowerCamelCase, ) -> None: """simple docstring""" _lowercase : Any = offset if additional_special_tokens is not None: if not isinstance(lowerCamelCase, lowerCamelCase): raise TypeError( F'''additional_special_tokens should be of type {type(lowerCamelCase)}, but is''' F''' {type(lowerCamelCase)}''') _lowercase : Optional[Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'''<unk_{i}>''' for i in range(len(lowerCamelCase), self.offset - 1) ] if len(set(lowerCamelCase)) != len(lowerCamelCase): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''') _lowercase : int = additional_special_tokens_extended else: _lowercase : List[Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset)] _lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, ) _lowercase : Optional[int] = mask_token_sent _lowercase : Optional[int] = vocab_file _lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(lowerCamelCase) # add special tokens to encoder dict _lowercase : Dict[int, str] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, }) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1)}) _lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()} @property def UpperCamelCase ( self) -> int: """simple docstring""" return len(self.sp_model) + self.offset def UpperCamelCase ( self) -> Dict[str, int]: """simple docstring""" _lowercase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Union[str, Any]: """simple docstring""" _lowercase : List[Any] = self.__dict__.copy() _lowercase : List[str] = None return state def __setstate__( self, lowerCamelCase) -> Dict: """simple docstring""" _lowercase : Union[str, Any] = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs'): _lowercase : Optional[Any] = {} _lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCamelCase ( self, lowerCamelCase) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> int: """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] _lowercase : str = self.sp_model.piece_to_id(lowerCamelCase) return sp_id + self.offset def UpperCamelCase ( self, lowerCamelCase) -> str: """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: _lowercase : Optional[int] = self.sp_model.IdToPiece(index - self.offset) return token def UpperCamelCase ( self, lowerCamelCase) -> Any: """simple docstring""" _lowercase : List[str] = [] _lowercase : Union[str, Any] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCamelCase) + token _lowercase : str = [] else: current_sub_tokens.append(lowerCamelCase) out_string += self.sp_model.decode(lowerCamelCase) return out_string.strip() def UpperCamelCase ( self, lowerCamelCase=False) -> Dict: """simple docstring""" return 1 def UpperCamelCase ( self, lowerCamelCase) -> str: """simple docstring""" _lowercase : str = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(lowerCamelCase) elif token_ids_a is None: return self._special_token_mask(lowerCamelCase) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return _lowercase : Any = os.path.join( lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, lowerCamelCase) elif not os.path.isfile(self.vocab_file): with open(lowerCamelCase, 'wb') as fi: _lowercase : int = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase) return (out_vocab_file,)
21
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__:str = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Tuple = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
0
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __SCREAMING_SNAKE_CASE :List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) return (preds == labels).mean() def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = simple_accuracy(__lowercase , __lowercase ) _UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0] _UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowercase , __lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mrpc": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "sts-b": return pearson_and_spearman(__lowercase , __lowercase ) elif task_name == "qqp": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase ) def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) if len(__lowercase ) != len(__lowercase ): raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase )
22
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__) def _lowerCamelCase( a ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(a , "git_log.json" ) , "w" ) as f: json.dump(a , a , indent=4 ) def _lowerCamelCase( a ): if params.n_gpu <= 0: __a = 0 __a = -1 __a = True __a = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __a = int(os.environ["WORLD_SIZE"] ) __a = int(os.environ["N_GPU_NODE"] ) __a = int(os.environ["RANK"] ) # number of nodes / node ID __a = params.world_size // params.n_gpu_per_node __a = params.global_rank // params.n_gpu_per_node __a = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __a = 1 __a = 0 __a = 0 __a = 0 __a = 1 __a = 1 __a = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __a = params.node_id == 0 and params.local_rank == 0 __a = params.n_nodes > 1 # summary __a = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def _lowerCamelCase( a ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
261
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__: int = logging.get_logger(__name__) UpperCamelCase__: Tuple = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = """funnel""" lowerCamelCase__ = { """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self : Dict , __snake_case : Optional[Any]=30522 , __snake_case : List[str]=[4, 4, 4] , __snake_case : Dict=None , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=768 , __snake_case : Dict=12 , __snake_case : Any=64 , __snake_case : Any=3072 , __snake_case : Any="gelu_new" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : str=0.0 , __snake_case : Tuple=0.1 , __snake_case : Dict=None , __snake_case : Optional[int]=1E-9 , __snake_case : Tuple="mean" , __snake_case : List[str]="relative_shift" , __snake_case : Dict=True , __snake_case : int=True , __snake_case : Optional[Any]=True , **__snake_case : Optional[Any] , ) -> Optional[int]: UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : List[Any] = block_sizes UpperCAmelCase : Optional[int] = [1] * len(__snake_case ) if block_repeats is None else block_repeats assert len(__snake_case ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." UpperCAmelCase : Optional[int] = num_decoder_layers UpperCAmelCase : List[Any] = d_model UpperCAmelCase : List[str] = n_head UpperCAmelCase : Optional[Any] = d_head UpperCAmelCase : Union[str, Any] = d_inner UpperCAmelCase : str = hidden_act UpperCAmelCase : int = hidden_dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Any = activation_dropout UpperCAmelCase : str = initializer_range UpperCAmelCase : List[Any] = initializer_std UpperCAmelCase : Tuple = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" UpperCAmelCase : Optional[Any] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" UpperCAmelCase : Optional[int] = attention_type UpperCAmelCase : str = separate_cls UpperCAmelCase : List[str] = truncate_seq UpperCAmelCase : Tuple = pool_q_only super().__init__(**__snake_case ) @property def A ( self : int ) -> Optional[int]: return sum(self.block_sizes ) @num_hidden_layers.setter def A ( self : List[Any] , __snake_case : List[str] ) -> Union[str, Any]: raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def A ( self : Dict ) -> int: return len(self.block_sizes ) @num_blocks.setter def A ( self : Dict , __snake_case : Optional[Any] ) -> Any: raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
23
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Optional[Any] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
261
0
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar snake_case_ = TypeVar('T') class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__(self : List[Any] , a__ : list[T] , a__ : Callable[[T, T], T] ): """simple docstring""" __snake_case = None __snake_case = len(a__ ) __snake_case = [any_type for _ in range(self.N )] + arr __snake_case = fnc self.build() def a (self : Dict ): """simple docstring""" for p in range(self.N - 1 , 0 , -1 ): __snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def a (self : Any , a__ : int , a__ : T ): """simple docstring""" p += self.N __snake_case = v while p > 1: __snake_case = p // 2 __snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def a (self : int , a__ : int , a__ : int ): # noqa: E741 """simple docstring""" __snake_case , __snake_case = l + self.N, r + self.N __snake_case = None while l <= r: if l % 2 == 1: __snake_case = self.st[l] if res is None else self.fn(a__ , self.st[l] ) if r % 2 == 0: __snake_case = self.st[r] if res is None else self.fn(a__ , self.st[r] ) __snake_case , __snake_case = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce snake_case_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] snake_case_ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } snake_case_ = SegmentTree(test_array, min) snake_case_ = SegmentTree(test_array, max) snake_case_ = SegmentTree(test_array, lambda a, b: a + b) def lowerCamelCase__ ( ) -> None: for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): __snake_case = reduce(snake_case_ , test_array[i : j + 1] ) __snake_case = reduce(snake_case_ , test_array[i : j + 1] ) __snake_case = reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): snake_case_ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
24
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ): __a , __a = row, column __a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )] def __str__( self ): __a = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __a = 0 for row_vector in self.array: for obj in row_vector: __a = max(lowerCamelCase , len(str(lowerCamelCase ) ) ) __a = F"%{max_element_length}s" # Make string and return def single_line(lowerCamelCase ) -> str: nonlocal string_format_identifier __a = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self ): return str(self ) def a__ ( self , lowerCamelCase ): if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCamelCase , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) __a = value def __add__( self , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] + another[r, c] return result def __neg__( self ): __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = -self[r, c] return result def __sub__( self , lowerCamelCase ): return self + (-another) def __mul__( self , lowerCamelCase ): if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] * another return result elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication assert self.column == another.row __a = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a = F"Unsupported type given for another ({type(lowerCamelCase )})" raise TypeError(lowerCamelCase ) def a__ ( self ): __a = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] return result def a__ ( self , lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a = v.transpose() __a = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _lowerCamelCase( ): # a^(-1) __a = Matrix(3 , 3 , 0 ) for i in range(3 ): __a = 1 print(F"a^(-1) is {ainv}" ) # u, v __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 1, 2, -3 __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" ) def _lowerCamelCase( ): import doctest doctest.testmod() testa()
261
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase__ : Union[str, Any] = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _lowerCamelCase( a , a , a , a , a=True , a="pt" ): __a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {} __a = padding_side return tokenizer( [line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , ) def _lowerCamelCase( a , a , a=None , ): __a = input_ids.ne(a ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ): super().__init__() __a = Path(lowerCamelCase ).joinpath(type_path + ".source" ) __a = Path(lowerCamelCase ).joinpath(type_path + ".target" ) __a = self.get_char_lens(self.src_file ) __a = max_source_length __a = max_target_length assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}" __a = tokenizer __a = prefix if n_obs is not None: __a = self.src_lens[:n_obs] __a = src_lang __a = tgt_lang def __len__( self ): return len(self.src_lens ) def __getitem__( self , lowerCamelCase ): __a = index + 1 # linecache starts at 1 __a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" ) __a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" ) assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __a = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer ) __a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" ) __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" ) __a = source_inputs["input_ids"].squeeze() __a = target_inputs["input_ids"].squeeze() __a = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( lowerCamelCase ): return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()] def a__ ( self , lowerCamelCase ): __a = torch.stack([x["input_ids"] for x in batch] ) __a = torch.stack([x["attention_mask"] for x in batch] ) __a = torch.stack([x["decoder_input_ids"] for x in batch] ) __a = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = trim_batch(lowerCamelCase , lowerCamelCase ) __a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase ) __a = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__) def _lowerCamelCase( a ): return list(itertools.chain.from_iterable(a ) ) def _lowerCamelCase( a ): __a = get_git_info() save_json(a , os.path.join(a , "git_log.json" ) ) def _lowerCamelCase( a , a , a=4 , **a ): with open(a , "w" ) as f: json.dump(a , a , indent=a , **a ) def _lowerCamelCase( a ): with open(a ) as f: return json.load(a ) def _lowerCamelCase( ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def _lowerCamelCase( a , a ): return list(map(a , a ) ) def _lowerCamelCase( a , a ): with open(a , "wb" ) as f: return pickle.dump(a , a ) def _lowerCamelCase( a ): def remove_articles(a ): return re.sub(R"\b(a|an|the)\b" , " " , a ) def white_space_fix(a ): return " ".join(text.split() ) def remove_punc(a ): __a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) ) def _lowerCamelCase( a , a ): __a = normalize_answer(a ).split() __a = normalize_answer(a ).split() __a = Counter(a ) & Counter(a ) __a = sum(common.values() ) if num_same == 0: return 0 __a = 1.0 * num_same / len(a ) __a = 1.0 * num_same / len(a ) __a = (2 * precision * recall) / (precision + recall) return fa def _lowerCamelCase( a , a ): return normalize_answer(a ) == normalize_answer(a ) def _lowerCamelCase( a , a ): assert len(a ) == len(a ) __a = 0 for hypo, pred in zip(a , a ): em += exact_match_score(a , a ) if len(a ) > 0: em /= len(a ) return {"em": em} def _lowerCamelCase( a ): return model_prefix.startswith("rag" ) def _lowerCamelCase( a , a , a ): __a = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __a = "dropout_rate" for p in extra_params: if getattr(a , a , a ): if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(a ) ) delattr(a , a ) continue __a = p if hasattr(a , a ) else equivalent_param[p] setattr(a , a , getattr(a , a ) ) delattr(a , a ) return hparams, config
261
0
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=64 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[int]: _A : Optional[int] = parent _A : Optional[Any] = batch_size _A : Tuple = seq_length _A : Optional[Any] = is_training _A : List[Any] = use_input_mask _A : Any = use_token_type_ids _A : List[Any] = use_labels _A : Optional[int] = vocab_size _A : str = hidden_size _A : List[str] = embedding_size _A : List[str] = num_hidden_layers _A : List[str] = num_attention_heads _A : Dict = intermediate_size _A : Dict = hidden_act _A : Any = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : str = max_position_embeddings _A : List[str] = type_vocab_size _A : int = type_sequence_label_size _A : Optional[Any] = initializer_range _A : Union[str, Any] = num_labels _A : Any = num_choices _A : List[str] = scope def a__ ( self ) -> Union[str, Any]: _A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A : List[str] = None if self.use_input_mask: _A : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) _A : Union[str, Any] = None if self.use_token_type_ids: _A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A : Any = None _A : List[Any] = None _A : Optional[Any] = None if self.use_labels: _A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _A : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self ) -> Optional[int]: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any: _A : Tuple = MegatronBertModel(config=_a ) model.to(_a ) model.eval() _A : str = model(_a , attention_mask=_a , token_type_ids=_a ) _A : Any = model(_a , token_type_ids=_a ) _A : int = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: _A : List[Any] = MegatronBertForMaskedLM(config=_a ) model.to(_a ) model.eval() _A : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : Optional[int] = MegatronBertForCausalLM(config=_a ) model.to(_a ) model.eval() _A : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : List[str] = MegatronBertForNextSentencePrediction(config=_a ) model.to(_a ) model.eval() _A : List[str] = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: _A : Dict = MegatronBertForPreTraining(config=_a ) model.to(_a ) model.eval() _A : str = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: _A : Dict = MegatronBertForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _A : Dict = model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple: _A : Tuple = self.num_labels _A : Any = MegatronBertForSequenceClassification(_a ) model.to(_a ) model.eval() _A : str = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> int: _A : Union[str, Any] = self.num_labels _A : Optional[int] = MegatronBertForTokenClassification(config=_a ) model.to(_a ) model.eval() _A : Dict = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : str = self.num_choices _A : Optional[Any] = MegatronBertForMultipleChoice(config=_a ) model.to(_a ) model.eval() _A : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : Optional[int] = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self ) -> int: _A : Optional[int] = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) : int = config_and_inputs _A : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _a = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _a = True # test_resize_embeddings = False _a = False def a__ ( self , _a , _a , _a=False ) -> Optional[int]: _A : List[str] = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class in get_values(_a ): _A : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a ) _A : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) return inputs_dict def a__ ( self ) -> Dict: _A : Dict = MegatronBertModelTester(self ) _A : str = ConfigTester(self , config_class=_a , hidden_size=37 ) def a__ ( self ) -> str: self.config_tester.run_common_tests() def a__ ( self ) -> Optional[int]: _A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*_a ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_a ) def a__ ( self ) -> Dict: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_a ) def a__ ( self ) -> int: _A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_a ) def a__ ( self ) -> List[str]: _A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*_a ) def a__ ( self ) -> int: _A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*_a ) def a__ ( self ) -> Dict: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_a ) def a__ ( self ) -> int: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*_a ) def lowerCAmelCase_ ( snake_case_ ): return torch.tensor( snake_case_,dtype=torch.long,device=snake_case_,) _snake_case = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow @unittest.skip("""Model is not available.""" ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: _A : List[str] = os.path.join(os.environ["""MYDIR"""] , _a ) _A : Any = MegatronBertModel.from_pretrained(_a ) model.to(_a ) model.half() _A : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): _A : Dict = model(_a )[0] _A : List[str] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , _a ) _A : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): _A : Any = output[0, ii, jj] _A : Optional[int] = expected[3 * ii + jj] _A : Tuple = """ii={} jj={} a={} b={}""".format(_a , _a , _a , _a ) self.assertTrue(math.isclose(_a , _a , rel_tol=_a , abs_tol=_a ) , msg=_a )
26
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case__ ( snake_case_ ): _snake_case : "DiagonalGaussianDistribution" class snake_case__ ( snake_case_, snake_case_ ): _snake_case : Optional[Any] = True @register_to_config def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ): super().__init__() # pass init params to Encoder __a = Encoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , ) # pass init params to Decoder __a = Decoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , ) __a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 ) __a = False __a = False # only relevant if vae tiling is enabled __a = self.config.sample_size __a = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __a = 0.25 def a__ ( self , lowerCamelCase , lowerCamelCase=False ): if isinstance(lowerCamelCase , (Encoder, Decoder) ): __a = value def a__ ( self , lowerCamelCase = True ): __a = use_tiling def a__ ( self ): self.enable_tiling(lowerCamelCase ) def a__ ( self ): __a = True def a__ ( self ): __a = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a__ ( self ): __a = {} def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return processors def a__ ( self , lowerCamelCase ): __a = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): if not isinstance(lowerCamelCase , lowerCamelCase ): module.set_processor(lowerCamelCase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: __a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase ) __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) @apply_forward_hook def a__ ( self , lowerCamelCase , lowerCamelCase = True ): if self.use_slicing and z.shape[0] > 1: __a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )] __a = torch.cat(lowerCamelCase ) else: __a = self._decode(lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[2] , b.shape[2] , lowerCamelCase ) for y in range(lowerCamelCase ): __a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __a = min(a.shape[3] , b.shape[3] , lowerCamelCase ) for x in range(lowerCamelCase ): __a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_latent_min_size * self.tile_overlap_factor ) __a = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __a = [] for i in range(0 , x.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , x.shape[3] , lowerCamelCase ): __a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __a = self.encoder(lowerCamelCase ) __a = self.quant_conv(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) __a = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True ): __a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __a = int(self.tile_sample_min_size * self.tile_overlap_factor ) __a = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __a = [] for i in range(0 , z.shape[2] , lowerCamelCase ): __a = [] for j in range(0 , z.shape[3] , lowerCamelCase ): __a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __a = self.post_quant_conv(lowerCamelCase ) __a = self.decoder(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) __a = [] for i, row in enumerate(lowerCamelCase ): __a = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: __a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) __a = torch.cat(lowerCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ): __a = sample __a = self.encode(lowerCamelCase ).latent_dist if sample_posterior: __a = posterior.sample(generator=lowerCamelCase ) else: __a = posterior.mode() __a = self.decode(lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase )
261
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : Tuple = logging.get_logger(__name__) __lowercase : List[Any] = torch.device('cpu') def lowerCamelCase (): __a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : int = dct.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = val def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Dict = [] for k in state_dict.keys(): __a : List[Any] = k if ".pwconv" in k: __a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __a : Optional[int] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __a : Union[str, Any] = k_new.split('.' ) if ls[2].isdigit(): __a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Union[str, Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __a : List[str] = 1_000 __a : Tuple = 'huggingface/label-files' __a : str = 'imagenet-1k-id2label.json' __a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Any = idalabel __a : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __a : Dict = [3, 3, 6, 4] __a : int = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __a : Dict = [3, 3, 9, 6] __a : List[str] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __a : Dict = [4, 3, 10, 5] __a : Optional[int] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __a : Tuple = [4, 4, 12, 6] __a : Dict = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE ) else: __a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : Optional[Any] = checkpoint __a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model __a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) # prepare test inputs __a : Tuple = prepare_img() __a : str = ViTImageProcessor.from_pretrained('preprocessor_config' ) __a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models __a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE ) __a : Dict = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') __lowercase : Tuple = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
27
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __a = feature_size __a = sampling_rate __a = padding_value __a = kwargs.pop("padding_side" , "right" ) __a = kwargs.pop("return_attention_mask" , lowerCamelCase ) super().__init__(**lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) __a = processed_features[self.model_input_names[0]] __a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCamelCase ) == 0: if return_attention_mask: __a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __a = required_input[0] if isinstance(lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCamelCase ): __a = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCamelCase ): __a = "tf" elif is_torch_tensor(lowerCamelCase ): __a = "pt" elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ): __a = "np" else: raise ValueError( F"type of {first_element} unknown: {type(lowerCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __a = to_numpy(lowerCamelCase ) else: __a = [to_numpy(lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase ) __a = processed_features[self.model_input_names[0]] __a = len(lowerCamelCase ) if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) __a = [] for i in range(lowerCamelCase ): __a = {k: v[i] for k, v in processed_features.items()} # truncation __a = self._truncate( lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , ) truncated_inputs.append(lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __a = PaddingStrategy.MAX_LENGTH __a = {} for i in range(lowerCamelCase ): # padding __a = self._pad( truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __a = [] if value.dtype is np.dtype(np.floataa ): __a = value.astype(np.floataa ) batch_outputs[key].append(lowerCamelCase ) return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ): __a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __a = len(lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __a = np.ones(len(lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: __a = max_length - len(lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (0, difference) ) __a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __a = np.pad( processed_features["attention_mask"] , (difference, 0) ) __a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __a = np.pad( lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) __a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __a = len(lowerCamelCase ) > max_length if needs_to_be_truncated: __a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __a = processed_features["attention_mask"][:max_length] return processed_features def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ): # Get padding strategy if padding is not False: if padding is True: __a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCamelCase , lowerCamelCase ): __a = PaddingStrategy(lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = padding else: __a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
261
0
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger("transformers.models.encodec") _lowerCamelCase : int = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } _lowerCamelCase : Optional[int] = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } _lowerCamelCase : Optional[Any] = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } _lowerCamelCase : int = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } _lowerCamelCase : Union[str, Any] = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } _lowerCamelCase : Optional[int] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } _lowerCamelCase : Optional[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Union[str, Any] = [] def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]: """simple docstring""" for attribute in key.split('.' ): UpperCamelCase = getattr(A__ , A__ ) if weight_type is not None: UpperCamelCase = getattr(A__ , A__ ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "weight_ih_l0": UpperCamelCase = value elif weight_type == "weight_hh_l0": UpperCamelCase = value elif weight_type == "bias_ih_l0": UpperCamelCase = value elif weight_type == "bias_hh_l0": UpperCamelCase = value elif weight_type == "weight_ih_l1": UpperCamelCase = value elif weight_type == "weight_hh_l1": UpperCamelCase = value elif weight_type == "bias_ih_l1": UpperCamelCase = value elif weight_type == "bias_hh_l1": UpperCamelCase = value else: UpperCamelCase = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def __lowerCamelCase ( A__ , A__ ) -> List[Any]: """simple docstring""" for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCamelCase , UpperCamelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def __lowerCamelCase ( A__ , A__ , A__ ) -> int: """simple docstring""" UpperCamelCase = [] if model_name == "encodec_24khz" or "encodec_32khz": UpperCamelCase = MAPPING_24K elif model_name == "encodec_48khz": UpperCamelCase = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(A__ , A__ ): logger.info(F"""{name} was ignored""" ) continue UpperCamelCase = False for key, mapped_key in MAPPING.items(): if "*" in key: UpperCamelCase , UpperCamelCase = key.split('.*.' ) if prefix in name and suffix in name: UpperCamelCase = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(A__ )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , A__ ) if "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "weight_ih_l0" in name: UpperCamelCase = 'weight_ih_l0' elif "weight_hh_l0" in name: UpperCamelCase = 'weight_hh_l0' elif "bias_ih_l0" in name: UpperCamelCase = 'bias_ih_l0' elif "bias_hh_l0" in name: UpperCamelCase = 'bias_hh_l0' elif "weight_ih_l1" in name: UpperCamelCase = 'weight_ih_l1' elif "weight_hh_l1" in name: UpperCamelCase = 'weight_hh_l1' elif "bias_ih_l1" in name: UpperCamelCase = 'bias_ih_l1' elif "bias_hh_l1" in name: UpperCamelCase = 'bias_hh_l1' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> Optional[int]: """simple docstring""" if config_path is not None: UpperCamelCase = EncodecConfig.from_pretrained(A__ ) else: UpperCamelCase = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": UpperCamelCase = [8, 5, 4, 4] UpperCamelCase = [2.2] UpperCamelCase = 64 UpperCamelCase = 32_000 UpperCamelCase = 2_048 UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False elif model_name == "encodec_48khz": UpperCamelCase = [8, 5, 4, 2] UpperCamelCase = [3.0, 6.0, 12.0, 24.0] UpperCamelCase = 48_000 UpperCamelCase = 2 UpperCamelCase = False UpperCamelCase = 'time_group_norm' UpperCamelCase = True UpperCamelCase = 1.0 UpperCamelCase = 0.01 else: raise ValueError(F"""Unknown model name: {model_name}""" ) UpperCamelCase = EncodecModel(A__ ) UpperCamelCase = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(A__ ) UpperCamelCase = torch.load(A__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights UpperCamelCase = original_checkpoint['best_state'] recursively_load_weights(A__ , A__ , A__ ) model.save_pretrained(A__ ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(A__ ) model.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _lowerCamelCase : Dict = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
"""simple docstring""" from collections import Counter from timeit import timeit def _lowerCamelCase( a = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def _lowerCamelCase( a = "" ): if len(a ) == 0: return True __a = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __a = {} for character in lower_case_input_str: __a = character_freq_dict.get(a , 0 ) + 1 __a = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCamelCase( a = "" ): print("\nFor string = " , a , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
261
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = 256 class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[str, Any] = ['''melgan'''] def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None: super().__init__() # From MELGAN UpperCAmelCase_ : Optional[Any] = math.log(1E-5 ) # Matches MelGAN training. UpperCAmelCase_ : Any = 4.0 # Largest value for most examples UpperCAmelCase_ : Optional[int] = 1_2_8 self.register_modules( notes_encoder=_UpperCamelCase , continuous_encoder=_UpperCamelCase , decoder=_UpperCamelCase , scheduler=_UpperCamelCase , melgan=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=(-1.0, 1.0) , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = output_range if clip: UpperCAmelCase_ : int = torch.clip(_UpperCamelCase , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCAmelCase_ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=(-1.0, 1.0) , _UpperCamelCase=False ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input_range UpperCAmelCase_ : int = torch.clip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if clip else outputs # Scale to [0, 1]. UpperCAmelCase_ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int: UpperCAmelCase_ : List[Any] = input_tokens > 0 UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.notes_encoder( encoder_input_tokens=_UpperCamelCase , encoder_inputs_mask=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.continuous_encoder( encoder_inputs=_UpperCamelCase , encoder_inputs_mask=_UpperCamelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Tuple = noise_time if not torch.is_tensor(_UpperCamelCase ): UpperCAmelCase_ : int = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_UpperCamelCase ) and len(timesteps.shape ) == 0: UpperCAmelCase_ : List[str] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCAmelCase_ : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCAmelCase_ : List[str] = self.decoder( encodings_and_masks=_UpperCamelCase , decoder_input_tokens=_UpperCamelCase , decoder_noise_time=_UpperCamelCase ) return logits @torch.no_grad() def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = 1_0_0 , _UpperCamelCase = True , _UpperCamelCase = "numpy" , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(_UpperCamelCase )}." ) UpperCAmelCase_ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCAmelCase_ : Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCAmelCase_ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCamelCase , device=self.device ) for i, encoder_input_tokens in enumerate(_UpperCamelCase ): if i == 0: UpperCAmelCase_ : Any = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCAmelCase_ : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCamelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCAmelCase_ : Optional[Any] = ones UpperCAmelCase_ : Union[str, Any] = self.scale_features( _UpperCamelCase , output_range=[-1.0, 1.0] , clip=_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCamelCase , continuous_mask=_UpperCamelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCAmelCase_ : List[Any] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_UpperCamelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_UpperCamelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCAmelCase_ : str = self.decode( encodings_and_masks=_UpperCamelCase , input_tokens=_UpperCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCAmelCase_ : List[str] = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample UpperCAmelCase_ : Tuple = self.scale_to_features(_UpperCamelCase , input_range=[-1.0, 1.0] ) UpperCAmelCase_ : List[str] = mel[:1] UpperCAmelCase_ : str = mel.cpu().float().numpy() UpperCAmelCase_ : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCamelCase , _UpperCamelCase ) logger.info('Generated segment' , _UpperCamelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( 'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( 'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' ) if output_type == "numpy": UpperCAmelCase_ : List[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCAmelCase_ : str = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_UpperCamelCase )
29
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin SCREAMING_SNAKE_CASE__:Any = random.Random() if is_torch_available(): import torch def _lowerCamelCase( a , a=1.0 , a=None , a=None ): if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ): __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = feature_size __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize def a__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ): def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __a = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case__ ( snake_case_, unittest.TestCase ): _snake_case : str = ASTFeatureExtractor def a__ ( self ): __a = ASTFeatureExtractionTester(self ) def a__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values __a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a = np.asarray(lowerCamelCase ) __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values __a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) @require_torch def a__ ( self ): import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(100 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def a__ ( self , lowerCamelCase ): from datasets import load_dataset __a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def a__ ( self ): # fmt: off __a = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on __a = self._load_datasamples(1 ) __a = ASTFeatureExtractor() __a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
261
0
def a ( snake_case__: list ): '''simple docstring''' for i in range(len(snake_case__ ) - 1 , 0 , -1 ): lowercase_ = False for j in range(snake_case__ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowercase_ , lowercase_ = unsorted[j - 1], unsorted[j] lowercase_ = True for j in range(snake_case__ ): if unsorted[j] > unsorted[j + 1]: lowercase_ , lowercase_ = unsorted[j + 1], unsorted[j] lowercase_ = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(f"{cocktail_shaker_sort(unsorted) = }")
30
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class snake_case__ ( snake_case_, snake_case_ ): @register_to_config def __init__( self , lowerCamelCase = 768 , ): super().__init__() __a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) ) __a = nn.Parameter(torch.ones(1 , lowerCamelCase ) ) def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ): __a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) ) __a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) ) return self def a__ ( self , lowerCamelCase ): __a = (embeds - self.mean) * 1.0 / self.std return embeds def a__ ( self , lowerCamelCase ): __a = (embeds * self.std) + self.mean return embeds
261
0
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __SCREAMING_SNAKE_CASE : Optional[Any] = get_logger() __SCREAMING_SNAKE_CASE : Optional[dict] = None class lowerCamelCase_ (TensorFormatter[Mapping, "jax.Array", Mapping] ): '''simple docstring''' def __init__( self : Any , A : List[Any]=None , A : List[str]=None , **A : List[str] ): super().__init__(features=A ) import jax from jaxlib.xla_client import Device if isinstance(A , A ): raise ValueError( F"""Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` """ "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) _UpperCAmelCase : Union[str, Any] = device if isinstance(A , A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _UpperCAmelCase : Dict = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F"""Device with string identifier {self.device} not listed among the available """ F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """ F"""device: {str(jax.devices()[0] )}.""" ) _UpperCAmelCase : List[Any] = str(jax.devices()[0] ) _UpperCAmelCase : List[str] = jnp_array_kwargs @staticmethod def _A ( ): import jax return {str(A ): device for device in jax.devices()} def _A ( self : Optional[int] , A : int ): import jax import jax.numpy as jnp if isinstance(A , A ) and column: if all( isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(A , axis=0 ) return column def _A ( self : List[str] , A : Optional[Any] ): import jax import jax.numpy as jnp if isinstance(A , (str, bytes, type(A )) ): return value elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _UpperCAmelCase : Optional[Any] = {} if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _UpperCAmelCase : Optional[Any] = {"dtype": jnp.intaa} else: _UpperCAmelCase : List[Any] = {"dtype": jnp.intaa} elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _UpperCAmelCase : List[str] = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(A , PIL.Image.Image ): _UpperCAmelCase : Optional[int] = np.asarray(A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _UpperCAmelCase : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} ) def _A ( self : Optional[int] , A : Optional[int] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(A , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(A , "__array__" ) and not isinstance(A , jax.Array ): _UpperCAmelCase : List[Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(A , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] ) elif isinstance(A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] ) return self._tensorize(A ) def _A ( self : List[str] , A : dict ): return map_nested(self._recursive_tensorize , A , map_list=A ) def _A ( self : Dict , A : pa.Table ): _UpperCAmelCase : Tuple = self.numpy_arrow_extractor().extract_row(A ) _UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_row(A ) return self.recursive_tensorize(A ) def _A ( self : Optional[Any] , A : pa.Table ): _UpperCAmelCase : Optional[Any] = self.numpy_arrow_extractor().extract_column(A ) _UpperCAmelCase : Any = self.python_features_decoder.decode_column(A , pa_table.column_names[0] ) _UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A ) _UpperCAmelCase : List[Any] = self._consolidate(A ) return column def _A ( self : List[str] , A : pa.Table ): _UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A ) _UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_batch(A ) _UpperCAmelCase : Optional[int] = self.recursive_tensorize(A ) for column_name in batch: _UpperCAmelCase : Any = self._consolidate(batch[column_name] ) return batch
31
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE__:List[str] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
261
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: a_ : Tuple = parent a_ : int = batch_size a_ : Tuple = seq_length a_ : List[Any] = is_training a_ : List[str] = use_token_type_ids a_ : Dict = use_labels a_ : Any = vocab_size a_ : List[str] = hidden_size a_ : Tuple = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Dict = intermediate_size a_ : Any = hidden_act a_ : List[str] = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : List[Any] = type_vocab_size a_ : int = type_sequence_label_size a_ : List[Any] = initializer_range a_ : List[str] = num_labels a_ : Union[str, Any] = num_choices a_ : str = scope a_ : Tuple = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = None if self.use_token_type_ids: a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : List[Any] = None a_ : Union[str, Any] = None a_ : List[Any] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Any = self.num_labels a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ : List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ : Dict = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]: a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : str = inputs_dict['labels'] a_ : Optional[int] = inputs_dict['labels'] a_ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : str = OpenAIGPTModelTester(self ) a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is a_ : Tuple = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
32
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( a , a , a , a="attention" ): __a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] __a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def _lowerCamelCase( a , a , a , a=False ): if split_mlp_wi: __a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] __a = (wi_a, wi_a) else: __a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] __a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def _lowerCamelCase( a , a , a , a ): return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def _lowerCamelCase( a , *, a , a ): __a = traverse_util.flatten_dict(variables["target"] ) __a = {"/".join(a ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __a = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , a ) __a = collections.OrderedDict() # Shared embeddings. __a = old["token_embedder/embedding"] # Encoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (MLP). __a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "encoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old[ "encoder/relpos_bias/rel_embedding" ].T __a = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(a ): # Block i, layer 0 (Self Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 1 (Cross Attention). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" ) __a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" ) __a = layer_norm __a = k.T __a = o.T __a = q.T __a = v.T # Block i, layer 2 (MLP). __a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" ) __a , __a = tax_mlp_lookup(a , a , "decoder" , a ) __a = layer_norm if split_mlp_wi: __a = wi[0].T __a = wi[1].T else: __a = wi.T __a = wo.T __a = old["decoder/decoder_norm/scale"] __a = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __a = old["decoder/logits_dense/kernel"].T return new def _lowerCamelCase( a , a ): __a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __a = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __a = state_dict["shared.weight"] return state_dict def _lowerCamelCase( a , a , a , a ): __a = checkpoints.load_tax_checkpoint(a ) __a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a ) __a = make_state_dict(a , a ) model.load_state_dict(a , strict=a ) def _lowerCamelCase( a , a , a , a = False ): __a = TaConfig.from_json_file(a ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __a = TaEncoderModel(a ) else: __a = TaForConditionalGeneration(a ) # Load weights from tf checkpoint load_tax_weights_in_ta(a , a , a , a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(a ) # Verify that we can load the checkpoint. model.from_pretrained(a ) print("Done" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
261
0
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _UpperCAmelCase ( _A ): @slow @require_torch def A ( self : Tuple ) -> List[str]: lowercase_ : int = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) lowercase_ : Any = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowercase_ : str = bertabert.config.encoder.vocab_size lowercase_ : Optional[Any] = tokenizer.sep_token_id lowercase_ : Optional[int] = tokenizer.cls_token_id lowercase_ : Any = 1_28 lowercase_ : Any = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) lowercase_ : Tuple = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) lowercase_ : Tuple = train_dataset.select(range(32 ) ) lowercase_ : List[str] = val_dataset.select(range(16 ) ) lowercase_ : Dict = 4 def _map_to_encoder_decoder_inputs(A : Any ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase_ : List[str] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=A , max_length=5_12 ) lowercase_ : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=A , max_length=1_28 ) lowercase_ : Optional[int] = inputs.input_ids lowercase_ : Optional[Any] = inputs.attention_mask lowercase_ : Optional[Any] = outputs.input_ids lowercase_ : Optional[int] = outputs.input_ids.copy() lowercase_ : Any = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] lowercase_ : int = outputs.attention_mask assert all(len(A ) == 5_12 for x in inputs.input_ids ) assert all(len(A ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(A : Union[str, Any] ): lowercase_ : Optional[int] = pred.label_ids lowercase_ : Dict = pred.predictions # all unnecessary tokens are removed lowercase_ : List[Any] = tokenizer.batch_decode(A , skip_special_tokens=A ) lowercase_ : Dict = tokenizer.batch_decode(A , skip_special_tokens=A ) lowercase_ : Union[str, Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A ) )] ) / len(A ) return {"accuracy": accuracy} # map train dataset lowercase_ : Optional[int] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=A , batch_size=A , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset lowercase_ : str = val_dataset.map( _map_to_encoder_decoder_inputs , batched=A , batch_size=A , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) lowercase_ : Optional[Any] = self.get_auto_remove_tmp_dir() lowercase_ : Union[str, Any] = SeqaSeqTrainingArguments( output_dir=A , per_device_train_batch_size=A , per_device_eval_batch_size=A , predict_with_generate=A , evaluation_strategy='''steps''' , do_train=A , do_eval=A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase_ : Any = SeqaSeqTrainer( model=A , args=A , compute_metrics=_compute_metrics , train_dataset=A , eval_dataset=A , tokenizer=A , ) # start training trainer.train()
33
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : str = StableUnCLIPImgaImgPipeline _snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case : Optional[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _snake_case : List[Any] = frozenset([] ) def a__ ( self ): __a = 32 __a = embedder_hidden_size # image encoding components __a = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __a = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase ) __a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __a = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __a = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , ) torch.manual_seed(0 ) __a = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) __a = AutoencoderKL() __a = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ): if str(lowerCamelCase ).startswith("mps" ): __a = torch.manual_seed(lowerCamelCase ) else: __a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) if pil_image: __a = input_image * 0.5 + 0.5 __a = input_image.clamp(0 , 1 ) __a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def a__ ( self ): __a = "cpu" # ensure determinism for the device-dependent torch.Generator __a = self.get_dummy_components() __a = StableUnCLIPImgaImgPipeline(**lowerCamelCase ) __a = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __a = self.get_dummy_inputs(lowerCamelCase ) inputs.update({"image_embeds": None} ) __a = sd_pipe(**lowerCamelCase ).images __a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase ) def a__ ( self ): __a = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) __a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = torch.Generator(device="cpu" ).manual_seed(0 ) __a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __a = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) __a = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __a = pipe( lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , ) __a = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
261
0
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCAmelCase = cst_fwd.get(_a , np.inf ) UpperCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCAmelCase = new_cost_f UpperCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ): UpperCAmelCase = -1 UpperCAmelCase = set() UpperCAmelCase = set() UpperCAmelCase = {source: 0} UpperCAmelCase = {destination: 0} UpperCAmelCase = {source: None} UpperCAmelCase = {destination: None} UpperCAmelCase = PriorityQueue() UpperCAmelCase = PriorityQueue() UpperCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCAmelCase , UpperCAmelCase = queue_forward.get() visited_forward.add(_a ) UpperCAmelCase , UpperCAmelCase = queue_backward.get() visited_backward.add(_a ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCAmelCase = shortest_distance return shortest_path_distance A ={ 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A ={ 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
34
"""simple docstring""" import random def _lowerCamelCase( a , a , a ): __a = a[left_index] __a = left_index + 1 for j in range(left_index + 1 , a ): if a[j] < pivot: __a , __a = a[i], a[j] i += 1 __a , __a = a[i - 1], a[left_index] return i - 1 def _lowerCamelCase( a , a , a ): if left < right: __a = random.randint(a , right - 1 ) __a , __a = ( a[left], a[pivot], ) # switches the pivot with the left most bound __a = partition(a , a , a ) quick_sort_random( a , a , a ) # recursive quicksort to the left of the pivot point quick_sort_random( a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point def _lowerCamelCase( ): __a = input("Enter numbers separated by a comma:\n" ).strip() __a = [int(a ) for item in user_input.split("," )] quick_sort_random(a , 0 , len(a ) ) print(a ) if __name__ == "__main__": main()
261
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _snake_case = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ): '''simple docstring''' _lowerCAmelCase : List[str] = int(round(sample_rate * max_length ) ) if len(_lowerCamelCase ) <= sample_length: return wav _lowerCAmelCase : Tuple = randint(0 , len(_lowerCamelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCAmelCase_ : lowerCamelCase__ = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'}) lowerCamelCase__ = field( default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) lowerCamelCase__ = field( default=a , metadata={'help': 'A file containing the training audio paths and labels.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'A file containing the validation audio paths and labels.'}) lowerCamelCase__ = field( default='train' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) lowerCamelCase__ = field( default='validation' , metadata={ 'help': ( 'The name of the training data set split to use (via the datasets library). Defaults to \'validation\'' ) } , ) lowerCamelCase__ = field( default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , ) lowerCamelCase__ = field( default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''}) lowerCamelCase__ = field( default=a , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowerCamelCase__ = field( default=a , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) lowerCamelCase__ = field( default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , ) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = field( default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , ) lowerCamelCase__ = field( default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'}) lowerCamelCase__ = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowerCamelCase__ = field( default=a , metadata={'help': 'Name or path of preprocessor config.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'}) lowerCamelCase__ = field( default=a , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) lowerCamelCase__ = field( default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def snake_case__ ( self): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`.", __a, ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`.") def A ( ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , _lowerCamelCase , _lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCAmelCase : Tuple = training_args.get_process_log_level() logger.setLevel(_lowerCamelCase ) transformers.utils.logging.set_verbosity(_lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _lowerCAmelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. _lowerCAmelCase : Tuple = DatasetDict() _lowerCAmelCase : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " F"{', '.join(raw_datasets['train'].column_names )}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--label_column_name` to the correct text column - one of " F"{', '.join(raw_datasets['train'].column_names )}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _lowerCAmelCase : Any = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _lowerCAmelCase : Union[str, Any] = feature_extractor.model_input_names[0] def train_transforms(_lowerCamelCase ): _lowerCAmelCase : Dict = [] for audio in batch[data_args.audio_column_name]: _lowerCAmelCase : Any = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_lowerCamelCase ) _lowerCAmelCase : List[str] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) _lowerCAmelCase : Optional[int] = {model_input_name: inputs.get(_lowerCamelCase )} _lowerCAmelCase : List[str] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = [audio["array"] for audio in batch[data_args.audio_column_name]] _lowerCAmelCase : Tuple = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) _lowerCAmelCase : List[str] = {model_input_name: inputs.get(_lowerCamelCase )} _lowerCAmelCase : Union[str, Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _lowerCAmelCase : int = raw_datasets["train"].features[data_args.label_column_name].names _lowerCAmelCase , _lowerCAmelCase : str = {}, {} for i, label in enumerate(_lowerCamelCase ): _lowerCAmelCase : List[str] = str(_lowerCamelCase ) _lowerCAmelCase : Tuple = label # Load the accuracy metric from the datasets package _lowerCAmelCase : Dict = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_lowerCamelCase ): _lowerCAmelCase : int = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids ) _lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase : Optional[int] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _lowerCAmelCase : Union[str, Any] = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: _lowerCAmelCase : int = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase ) # Initialize our trainer _lowerCAmelCase : Optional[Any] = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , ) # Training if training_args.do_train: _lowerCAmelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCAmelCase : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCAmelCase : Union[str, Any] = last_checkpoint _lowerCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCAmelCase : Dict = trainer.evaluate() trainer.log_metrics("eval" , _lowerCamelCase ) trainer.save_metrics("eval" , _lowerCamelCase ) # Write model card and (optionally) push to hub _lowerCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCamelCase ) else: trainer.create_model_card(**_lowerCamelCase ) if __name__ == "__main__": main()
36
"""simple docstring""" import copy import re class snake_case__ : _snake_case : Dict = """hp""" _snake_case : List[str] = {} _snake_case : int = None @classmethod def a__ ( cls , lowerCamelCase , lowerCamelCase ): __a = prefix __a = defaults cls.build_naming_info() @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): if len(lowerCamelCase ) == 0: return "" __a = None if any(char.isdigit() for char in word ): raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase ) + 1 ): __a = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __a = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase ): __a = "" while integer != 0: __a = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __a = 0 while True: __a = word + "#" + int_to_alphabetic(lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: __a = sword break __a = short_word __a = word return short_word @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = param_name.split("_" ) __a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __a = ["", "_"] for separator in separators: __a = separator.join(lowerCamelCase ) if shortname not in info["reverse_short_param"]: __a = shortname __a = param_name return shortname return param_name @staticmethod def a__ ( lowerCamelCase , lowerCamelCase ): __a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase ) __a = short_name __a = param_name @classmethod def a__ ( cls ): if cls.NAMING_INFO is not None: return __a = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __a = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase , lowerCamelCase ) __a = info @classmethod def a__ ( cls , lowerCamelCase ): cls.build_naming_info() assert cls.PREFIX is not None __a = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __a = cls.NAMING_INFO["short_param"][k] if isinstance(lowerCamelCase , lowerCamelCase ): __a = 1 if v else 0 __a = "" if isinstance(lowerCamelCase , (int, float) ) else "-" __a = F"{key}{sep}{v}" name.append(lowerCamelCase ) return "_".join(lowerCamelCase ) @classmethod def a__ ( cls , lowerCamelCase ): __a = repr[len(cls.PREFIX ) + 1 :] if repr == "": __a = [] else: __a = repr.split("_" ) __a = {} for value in values: if "-" in value: __a , __a = value.split("-" ) else: __a = re.sub("[0-9.]" , "" , lowerCamelCase ) __a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) ) __a = cls.NAMING_INFO["reverse_short_param"][p_k] __a = p_v for k in cls.DEFAULTS: if k not in parameters: __a = cls.DEFAULTS[k] return parameters
261
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = """upernet""" def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowerCamelCase , lowerCamelCase ): __a = backbone_config.get("model_type" ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(lowerCamelCase ) __a = backbone_config __a = hidden_size __a = initializer_range __a = pool_scales __a = use_auxiliary_head __a = auxiliary_loss_weight __a = auxiliary_in_channels __a = auxiliary_channels __a = auxiliary_num_convs __a = auxiliary_concat_input __a = loss_ignore_index def a__ ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
261
0
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[Any]: """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__magic_name__ ): return ext raise Exception( f"""Unable to determine file format from file extension {path}. """ f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Optional[int]: """simple docstring""" UpperCamelCase :List[str] = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) UpperCamelCase :Optional[int] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format UpperCamelCase :Dict = PipelineDataFormat.from_str( format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(__magic_name__ , __magic_name__ ) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[Any] , __lowerCamelCase : Pipeline , __lowerCamelCase : PipelineDataFormat ): UpperCamelCase :str = nlp UpperCamelCase :Union[str, Any] = reader @staticmethod def _A ( __lowerCamelCase : ArgumentParser ): UpperCamelCase :Union[str, Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" ) run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" ) run_parser.add_argument("""--input""" , type=__lowerCamelCase , help="""Path to the file to use for inference""" ) run_parser.add_argument("""--output""" , type=__lowerCamelCase , help="""Path to the file that will be used post to write results.""" ) run_parser.add_argument("""--model""" , type=__lowerCamelCase , help="""Name or path to the model to instantiate.""" ) run_parser.add_argument("""--config""" , type=__lowerCamelCase , help="""Name or path to the model's config to instantiate.""" ) run_parser.add_argument( """--tokenizer""" , type=__lowerCamelCase , help="""Name of the tokenizer to use. (default: same as the model name)""" ) run_parser.add_argument( """--column""" , type=__lowerCamelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , ) run_parser.add_argument( """--format""" , type=__lowerCamelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , ) run_parser.add_argument( """--device""" , type=__lowerCamelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" ) run_parser.set_defaults(func=__lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase , UpperCamelCase :Union[str, Any] = self._nlp, [] for entry in self._reader: UpperCamelCase :Any = nlp(**__lowerCamelCase ) if self._reader.is_multi_columns else nlp(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): outputs.append(__lowerCamelCase ) else: outputs += output # Saving data if self._nlp.binary_output: UpperCamelCase :List[Any] = self._reader.save_binary(__lowerCamelCase ) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(__lowerCamelCase )
38
"""simple docstring""" def _lowerCamelCase( a = 1_0_0_0 ): __a = 3 __a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 1_5 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
261
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( snake_case__): """simple docstring""" UpperCamelCase__ = "unispeech" def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=128 , UpperCAmelCase=16 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase=320 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=100 , UpperCAmelCase=256 , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=80 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=0.5 , **UpperCAmelCase , ): """simple docstring""" super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_norm _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(UpperCAmelCase ) _UpperCAmelCase = list(UpperCAmelCase ) _UpperCAmelCase = list(UpperCAmelCase ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layerdrop _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = num_ctc_classes _UpperCAmelCase = vocab_size _UpperCAmelCase = do_stable_layer_norm _UpperCAmelCase = use_weighted_layer_sum _UpperCAmelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = apply_spec_augment _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length _UpperCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _UpperCAmelCase = num_codevectors_per_group _UpperCAmelCase = num_codevector_groups _UpperCAmelCase = contrastive_logits_temperature _UpperCAmelCase = feat_quantizer_dropout _UpperCAmelCase = num_negatives _UpperCAmelCase = codevector_dim _UpperCAmelCase = proj_codevector_dim _UpperCAmelCase = diversity_loss_weight # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # pretraining loss _UpperCAmelCase = replace_prob @property def UpperCamelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
39
"""simple docstring""" import operator def _lowerCamelCase( a , a = False , a = None ): __a = operator.lt if reverse else operator.gt __a = solution or [] if not arr: return solution __a = [arr.pop(0 )] for i, item in enumerate(a ): if _operator(a , sublist[-1] ): sublist.append(a ) arr.pop(a ) # merging sublist into solution list if not solution: solution.extend(a ) else: while sublist: __a = sublist.pop(0 ) for i, xx in enumerate(a ): if not _operator(a , a ): solution.insert(a , a ) break else: solution.append(a ) strand_sort(a , a , a ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
261
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _A ( _a ): """simple docstring""" UpperCAmelCase : Optional[Any] = """lxmert""" UpperCAmelCase : str = {} def __init__( self : int , __UpperCAmelCase : List[str]=30522 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : List[Any]=9500 , __UpperCAmelCase : Dict=1600 , __UpperCAmelCase : Tuple=400 , __UpperCAmelCase : List[str]=3072 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : Optional[int]=9 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : List[str]=2048 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Tuple=6.67 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : str , ): a : Tuple = vocab_size a : Optional[Any] = hidden_size a : List[str] = num_attention_heads a : int = hidden_act a : Dict = intermediate_size a : Optional[Any] = hidden_dropout_prob a : Tuple = attention_probs_dropout_prob a : str = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : Tuple = layer_norm_eps a : Any = num_qa_labels a : str = num_object_labels a : Any = num_attr_labels a : Union[str, Any] = l_layers a : int = x_layers a : str = r_layers a : Dict = visual_feat_dim a : int = visual_pos_dim a : Optional[int] = visual_loss_normalizer a : Any = task_matched a : Optional[int] = task_mask_lm a : Union[str, Any] = task_obj_predict a : List[str] = task_qa a : Any = visual_obj_loss a : Union[str, Any] = visual_attr_loss a : int = visual_feat_loss a : int = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**__UpperCAmelCase)
40
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
0