code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : List[Any]=False ): A = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): A = 'segformer.encoder.' + key if key.startswith('backbone' ): A = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A = key[key.find('patch_embed' ) + len('patch_embed' )] A = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case__ )-1}' ) if "norm" in key: A = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] A = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case__ )-1}' ) if "layer_norm1" in key: A = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: A = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 A = key[key.find('block' ) + len('block' )] A = key.replace(F'block{idx}' , F'block.{int(snake_case__ )-1}' ) if "attn.q" in key: A = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: A = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: A = key.replace('attn' , 'attention.self' ) if "fc1" in key: A = key.replace('fc1' , 'dense1' ) if "fc2" in key: A = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: A = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: A = key.replace('linear_fuse.conv' , 'linear_fuse' ) A = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A = key[key.find('linear_c' ) + len('linear_c' )] A = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case__ )-1}' ) if key.startswith('head' ): A = key.replace('head' , 'classifier' ) A = value return new_state_dict def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' ) A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict A = kv_weight[ : config.hidden_sizes[i], : ] A = kv_bias[: config.hidden_sizes[i]] A = kv_weight[ config.hidden_sizes[i] :, : ] A = kv_bias[ config.hidden_sizes[i] : ] def _snake_case ( ): A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return image @torch.no_grad() def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple ): A = SegformerConfig() A = False # set attributes based on model_name A = 'huggingface/label-files' if "segformer" in model_name: A = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: A = 150 A = 'ade20k-id2label.json' A = (1, 150, 128, 128) elif "city" in model_name: A = 19 A = 'cityscapes-id2label.json' A = (1, 19, 128, 128) else: raise ValueError(F'Model {model_name} not supported' ) elif "mit" in model_name: A = True A = model_name[4:6] A = 1000 A = 'imagenet-1k-id2label.json' A = (1, 1000) else: raise ValueError(F'Model {model_name} not supported' ) # set config attributes A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": A = [64, 128, 320, 512] A = 256 elif size == "b2": A = [64, 128, 320, 512] A = 768 A = [3, 4, 6, 3] elif size == "b3": A = [64, 128, 320, 512] A = 768 A = [3, 4, 18, 3] elif size == "b4": A = [64, 128, 320, 512] A = 768 A = [3, 8, 27, 3] elif size == "b5": A = [64, 128, 320, 512] A = 768 A = [3, 6, 40, 3] else: raise ValueError(F'Size {size} not supported' ) # load image processor (only resize + normalize) A = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ ) # prepare image A = prepare_img() A = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values logger.info(F'Converting model {model_name}...' ) # load original state dict if encoder_only: A = torch.load(snake_case__ , map_location=torch.device('cpu' ) ) else: A = torch.load(snake_case__ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys A = rename_keys(snake_case__ , encoder_only=snake_case__ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(snake_case__ , snake_case__ ) # create HuggingFace model and load state dict if encoder_only: A = False A = SegformerForImageClassification(snake_case__ ) else: A = SegformerForSemanticSegmentation(snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() # forward pass A = model(snake_case__ ) A = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": A = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": A = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": A = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": A = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": A = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": A = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": A = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": A = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": A = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": A = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": A = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": A = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": A = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": A = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": A = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: A = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowercase = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''dandelin/vilt-b32-finetuned-vqa''' _lowerCamelCase: int = ( '''This is a tool that answers a question about an image. It takes an input named `image` which should be the ''' '''image containing the information, as well as a `question` which should be the question in English. It ''' '''returns a text that is the answer to the question.''' ) _lowerCamelCase: str = '''image_qa''' _lowerCamelCase: int = AutoProcessor _lowerCamelCase: Any = AutoModelForVisualQuestionAnswering _lowerCamelCase: List[Any] = ['''image''', '''text'''] _lowerCamelCase: Union[str, Any] = ['''text'''] def __init__( self : Dict ,*A_ : int ,**A_ : Optional[int] ) -> Optional[int]: requires_backends(self ,['vision'] ) super().__init__(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : "Image" ,A_ : str ) -> int: return self.pre_processor(A_ ,A_ ,return_tensors='pt' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> Dict: with torch.no_grad(): return self.model(**A_ ).logits def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Tuple: A = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def _snake_case ( snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue A = cst_fwd.get(snake_case__ , np.inf ) A = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) A = new_cost_f A = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: A = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ): A = -1 A = set() A = set() A = {source: 0} A = {destination: 0} A = {source: None} A = {destination: None} A = PriorityQueue() A = PriorityQueue() A = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): A , A = queue_forward.get() visited_forward.add(snake_case__ ) A , A = queue_backward.get() visited_backward.add(snake_case__ ) A = pass_and_relaxation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) A = pass_and_relaxation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: A = shortest_distance return shortest_path_distance _lowercase = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } _lowercase = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : int ,A_ : List[str]=12 ,A_ : Optional[int]=7 ,A_ : Any=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Any=99 ,A_ : Optional[int]=32 ,A_ : List[str]=32 ,A_ : Optional[Any]=2 ,A_ : str=4 ,A_ : Optional[int]=37 ,A_ : int=0.1 ,A_ : int=0.1 ,A_ : Optional[int]=512 ,A_ : Optional[Any]=0.02 ,A_ : Optional[int]=0 ,A_ : Tuple=None ,) -> Union[str, Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_labels A = vocab_size A = hidden_size A = projection_dim A = num_hidden_layers A = num_attention_heads A = intermediate_size A = dropout A = attention_dropout A = max_position_embeddings A = initializer_range A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: A = input_mask.numpy() A , A = input_mask.shape A = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(A_ ): A = 1 A = 0 A = self.get_config() return config, input_ids, tf.convert_to_tensor(A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Optional[Any] ) -> str: A = TFBlipTextModel(config=A_ ) A = model(A_ ,attention_mask=A_ ,training=A_ ) A = model(A_ ,training=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = (TFBlipTextModel,) if is_tf_available() else () _lowerCamelCase: Optional[int] = False _lowerCamelCase: str = False _lowerCamelCase: Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = BlipTextModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: pass @unittest.skip(reason='Blip does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFBlipTextModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str]=True ) -> Tuple: super().test_pt_tf_model_equivalence(allow_missing_keys=A_ )
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> Optional[Any]: A = '' A = '' A = [] def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : int ) -> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: A = self.__min_dist_top_down_dp(m - 1 ,n - 1 ) else: A = self.__min_dist_top_down_dp(A_ ,n - 1 ) A = self.__min_dist_top_down_dp(m - 1 ,A_ ) A = self.__min_dist_top_down_dp(m - 1 ,n - 1 ) A = 1 + min(A_ ,A_ ,A_ ) return self.dp[m][n] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ) -> int: A = worda A = worda A = [[-1 for _ in range(len(A_ ) )] for _ in range(len(A_ ) )] return self.__min_dist_top_down_dp(len(A_ ) - 1 ,len(A_ ) - 1 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,A_ : str ) -> int: A = worda A = worda A = len(A_ ) A = len(A_ ) A = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty A = j elif j == 0: # second string is empty A = i elif worda[i - 1] == worda[j - 1]: # last characters are equal A = self.dp[i - 1][j - 1] else: A = self.dp[i][j - 1] A = self.dp[i - 1][j] A = self.dp[i - 1][j - 1] A = 1 + min(A_ ,A_ ,A_ ) return self.dp[m][n] if __name__ == "__main__": _lowercase = EditDistance() print('''****************** Testing Edit Distance DP Algorithm ******************''') print() _lowercase = input('''Enter the first string: ''').strip() _lowercase = input('''Enter the second string: ''').strip() print() print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ): A = len(snake_case__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(snake_case__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , ) def _snake_case ( snake_case__ : int ): A = [] depth_first_search([] , [] , [] , snake_case__ , snake_case__ ) # Print all the boards for board in boards: for column in board: print(snake_case__ ) print('' ) print(len(snake_case__ ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import math from collections.abc import Callable def _snake_case ( snake_case__ : Callable[[float], float] , snake_case__ : float , snake_case__ : float ): A = xa A = xa while True: if x_n == x_na or function(snake_case__ ) == function(snake_case__ ): raise ZeroDivisionError('float division by zero, could not find root' ) A = x_na - ( function(snake_case__ ) / ((function(snake_case__ ) - function(snake_case__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A = x_na A = x_na def _snake_case ( snake_case__ : float ): return math.pow(snake_case__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" _lowercase = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []} _lowercase = ['''a''', '''b''', '''c''', '''d''', '''e'''] def _snake_case ( snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[Any] ): A = start # add current to visited visited.append(snake_case__ ) A = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: A = topological_sort(snake_case__ , snake_case__ , snake_case__ ) # if all neighbors visited add current to sort sort.append(snake_case__ ) # if all vertices haven't been visited select a new one to visit if len(snake_case__ ) != len(snake_case__ ): for vertice in vertices: if vertice not in visited: A = topological_sort(snake_case__ , snake_case__ , snake_case__ ) # return sort return sort if __name__ == "__main__": _lowercase = topological_sort('''a''', [], []) print(sort)
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A = 'ylacombe/bark-small' A = tempfile.mkdtemp() A = 'en_speaker_1' A = 'This is a test string' A = 'speaker_embeddings_path.json' A = 'speaker_embeddings' def _SCREAMING_SNAKE_CASE ( self : Any ,**A_ : Any ) -> List[Any]: return AutoTokenizer.from_pretrained(self.checkpoint ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.get_tokenizer() A = BarkProcessor(tokenizer=A_ ) processor.save_pretrained(self.tmpdirname ) A = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) A = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) A = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='(BOS)' ,eos_token='(EOS)' ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def _SCREAMING_SNAKE_CASE ( self : str ) -> str: A = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) A = 35 A = 2 A = 8 A = { 'semantic_prompt': np.ones(A_ ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset A = processor(text=self.input_string ,voice_preset=A_ ) A = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A_ ,np.array([] ) ).tolist() ) # test loading voice preset from npz file A = os.path.join(self.tmpdirname ,'file.npz' ) np.savez(A_ ,**A_ ) A = processor(text=self.input_string ,voice_preset=A_ ) A = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A_ ,np.array([] ) ).tolist() ) # test loading voice preset from the hub A = processor(text=self.input_string ,voice_preset=self.voice_preset ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = self.get_tokenizer() A = BarkProcessor(tokenizer=A_ ) A = processor(text=self.input_string ) A = tokenizer( self.input_string ,padding='max_length' ,max_length=256 ,add_special_tokens=A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ) -> List[str]: A = 0 A = 0 A = {} def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ) -> int: if vertex not in self.adjacency: A = {} self.num_vertices += 1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Optional[Any]: self.add_vertex(A_ ) self.add_vertex(A_ ) if head == tail: return A = weight A = weight def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: A = self.get_edges() for edge in edges: A , A , A = edge edges.remove((tail, head, weight) ) for i in range(len(A_ ) ): A = list(edges[i] ) edges.sort(key=lambda A_ : e[2] ) for i in range(len(A_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: A = edges[i][2] + 1 for edge in edges: A , A , A = edge A = weight A = weight def __str__( self : Union[str, Any] ) -> str: A = '' for tail in self.adjacency: for head in self.adjacency[tail]: A = self.adjacency[head][tail] string += F'{head} -> {tail} == {weight}\n' return string.rstrip('\n' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.adjacency.keys() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Dict=None ,A_ : int=None ) -> Tuple: A = Graph() if vertices is None: A = [] if edges is None: A = [] for vertex in vertices: g.add_vertex(A_ ) for edge in edges: g.add_edge(*A_ ) return g class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str ) -> Union[str, Any]: A = {} A = {} def __len__( self : int ) -> int: return len(self.parent ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> Any: if item in self.parent: return self.find(A_ ) A = item A = 0 return item def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ) -> Tuple: if item not in self.parent: return self.make_set(A_ ) if item != self.parent[item]: A = self.find(self.parent[item] ) return self.parent[item] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ) -> List[str]: A = self.find(A_ ) A = self.find(A_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: A = roota return roota if self.rank[roota] < self.rank[roota]: A = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 A = roota return roota return None @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ) -> Optional[int]: A = graph.num_vertices A = Graph.UnionFind() A = [] while num_components > 1: A = {} for vertex in graph.get_vertices(): A = -1 A = graph.get_edges() for edge in edges: A , A , A = edge edges.remove((tail, head, weight) ) for edge in edges: A , A , A = edge A = union_find.find(A_ ) A = union_find.find(A_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: A = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: A = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: A , A , A = cheap_edge[vertex] if union_find.find(A_ ) != union_find.find(A_ ): union_find.union(A_ ,A_ ) mst_edges.append(cheap_edge[vertex] ) A = num_components - 1 A = Graph.build(edges=A_ ) return mst
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = ConsistencyModelPipeline _lowerCamelCase: Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _lowerCamelCase: Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _lowerCamelCase: Any = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' ,subfolder='test_unet' ,) return unet @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' ,subfolder='test_unet_class_cond' ,) return unet def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=False ) -> Tuple: if class_cond: A = self.dummy_cond_unet else: A = self.dummy_uncond_unet # Default to CM multistep sampler A = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,) A = { 'unet': unet, 'scheduler': scheduler, } return components def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Union[str, Any] ,A_ : Dict=0 ) -> int: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = ConsistencyModelPipeline(**A_ ) A = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) A = image[0, -3:, -3:, -1] A = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components(class_cond=A_ ) A = ConsistencyModelPipeline(**A_ ) A = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 0 A = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) A = image[0, -3:, -3:, -1] A = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = ConsistencyModelPipeline(**A_ ) A = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 1 A = None A = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) A = image[0, -3:, -3:, -1] A = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components(class_cond=A_ ) A = ConsistencyModelPipeline(**A_ ) A = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 1 A = None A = 0 A = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) A = image[0, -3:, -3:, -1] A = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any=0 ,A_ : Any=False ,A_ : List[Any]="cpu" ,A_ : str=torch.floataa ,A_ : Any=(1, 3, 64, 64) ) -> Tuple: A = torch.manual_seed(A_ ) A = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: A = self.get_fixed_latents(seed=A_ ,device=A_ ,dtype=A_ ,shape=A_ ) A = latents return inputs def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict=0 ,A_ : Any="cpu" ,A_ : List[Any]=torch.floataa ,A_ : List[str]=(1, 3, 64, 64) ) -> List[Any]: if type(A_ ) == str: A = torch.device(A_ ) A = torch.Generator(device=A_ ).manual_seed(A_ ) A = randn_tensor(A_ ,generator=A_ ,device=A_ ,dtype=A_ ) return latents def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' ) A = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,) A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs() A = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) A = image[0, -3:, -3:, -1] A = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' ) A = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,) A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs() A = 1 A = None A = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) A = image[0, -3:, -3:, -1] A = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' ) A = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,) A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ ) pipe.to(torch_device=A_ ,torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(get_fixed_latents=A_ ,device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ ,enable_math=A_ ,enable_mem_efficient=A_ ): A = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) A = image[0, -3:, -3:, -1] A = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' ) A = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,) A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ ) pipe.to(torch_device=A_ ,torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(get_fixed_latents=A_ ,device=A_ ) A = 1 A = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ ,enable_math=A_ ,enable_mem_efficient=A_ ): A = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) A = image[0, -3:, -3:, -1] A = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" _lowercase = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } _lowercase = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: A = ( F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' F'Valid values are: {", ".join(snake_case__ )}' ) raise ValueError(snake_case__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance == 0: return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = ['''input_features''', '''is_longer'''] def __init__( self : Dict ,A_ : Optional[Any]=64 ,A_ : str=4_8000 ,A_ : Dict=480 ,A_ : List[Any]=10 ,A_ : List[str]=1024 ,A_ : Any=0.0 ,A_ : Tuple=False ,A_ : float = 0 ,A_ : float = 1_4000 ,A_ : int = None ,A_ : str = "fusion" ,A_ : str = "repeatpad" ,**A_ : Tuple ,) -> Optional[Any]: super().__init__( feature_size=A_ ,sampling_rate=A_ ,padding_value=A_ ,return_attention_mask=A_ ,**A_ ,) A = top_db A = truncation A = padding A = fft_window_size A = (fft_window_size >> 1) + 1 A = hop_length A = max_length_s A = max_length_s * sampling_rate A = sampling_rate A = frequency_min A = frequency_max A = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A_ ,min_frequency=A_ ,max_frequency=A_ ,sampling_rate=A_ ,norm=A_ ,mel_scale='htk' ,) A = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A_ ,min_frequency=A_ ,max_frequency=A_ ,sampling_rate=A_ ,norm='slaney' ,mel_scale='slaney' ,) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, Any]: A = copy.deepcopy(self.__dict__ ) A = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : np.array ,A_ : Optional[np.array] = None ) -> np.ndarray: A = spectrogram( A_ ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A_ ,log_mel='dB' ,) return log_mel_spectrogram.T def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : str ,A_ : str ) -> List[str]: A = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A = [0] # randomly choose index for each part A = np.random.choice(ranges[0] ) A = np.random.choice(ranges[1] ) A = np.random.choice(ranges[2] ) A = mel[idx_front : idx_front + chunk_frames, :] A = mel[idx_middle : idx_middle + chunk_frames, :] A = mel[idx_back : idx_back + chunk_frames, :] A = torch.tensor(mel[None, None, :] ) A = torch.nn.functional.interpolate( A_ ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=A_ ) A = mel_shrink[0][0].numpy() A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _SCREAMING_SNAKE_CASE ( self : int ,A_ : np.array ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : int ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": A = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A = len(A_ ) - max_length A = np.random.randint(0 ,overflow + 1 ) A = waveform[idx : idx + max_length] A = self._np_extract_fbank_features(A_ ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A = self._np_extract_fbank_features(A_ ,self.mel_filters ) A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A = np.stack([mel, mel, mel, mel] ,axis=0 ) A = False else: A = self._random_mel_fusion(A_ ,A_ ,A_ ) A = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented' ) else: A = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A = int(max_length / len(A_ ) ) A = np.stack(np.tile(A_ ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A = int(max_length / len(A_ ) ) A = np.stack(np.tile(A_ ,A_ ) ) A = np.pad(A_ ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 ) if truncation == "fusion": A = self._np_extract_fbank_features(A_ ,self.mel_filters ) A = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A = self._np_extract_fbank_features(A_ ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A_ : str = None ,A_ : Optional[str] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = None ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchFeature: A = truncation if truncation is not None else self.truncation A = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) A = isinstance(A_ ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A = is_batched_numpy or ( isinstance(A_ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A = [np.asarray(A_ ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A_ ,np.ndarray ): A = np.asarray(A_ ,dtype=np.floataa ) elif isinstance(A_ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A = [np.asarray(A_ )] # convert to mel spectrogram, truncate and pad if needed. A = [ self._get_input_mel(A_ ,max_length if max_length else self.nb_max_samples ,A_ ,A_ ) for waveform in raw_speech ] A = [] A = [] for mel, longer in padded_inputs: input_mel.append(A_ ) is_longer.append(A_ ) if truncation == "fusion" and sum(A_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A = np.random.randint(0 ,len(A_ ) ) A = True if isinstance(input_mel[0] ,A_ ): A = [np.asarray(A_ ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A = [[longer] for longer in is_longer] A = {'input_features': input_mel, 'is_longer': is_longer} A = BatchFeature(A_ ) if return_tensors is not None: A = input_features.convert_to_tensors(A_ ) return input_features
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowercase = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def _snake_case ( snake_case__ : str=True ): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = None _lowerCamelCase: str = None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Tuple ) -> Any: with TemporaryDirectory() as tmp_dir: A = dataset_module_factory(A_ ,cache_dir=A_ ) A = import_main_class(dataset_module.module_path ,dataset=A_ ) A = builder_cls( cache_dir=A_ ,config_name=A_ ,hash=dataset_module.hash ,) A = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A_ ).replace(os.sep ,'/' ), config.DATASET_INFO_FILENAME, ] ) A = cached_path(A_ ,cache_dir=A_ ) self.assertTrue(os.path.exists(A_ ) ) @pytest.mark.integration def _snake_case ( snake_case__ : Optional[int] ): A = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' A = dataset_module_factory('wikipedia' , cache_dir=snake_case__ ) A = import_main_class(dataset_module.module_path ) A = builder_cls( cache_dir=snake_case__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam A = None builder_instance.download_and_prepare() A = builder_instance.as_dataset() assert ds @pytest.mark.integration def _snake_case ( snake_case__ : List[Any] ): A = dataset_module_factory('wikipedia' , cache_dir=snake_case__ ) A = import_main_class(dataset_module.module_path , dataset=snake_case__ ) A = builder_cls( cache_dir=snake_case__ , config_name='20220301.frr' , hash=dataset_module.hash , ) A = builder_instance.as_streaming_dataset() assert ds assert isinstance(snake_case__ , snake_case__ ) assert "train" in ds assert isinstance(ds['train'] , snake_case__ ) assert next(iter(ds['train'] ) )
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = tempfile.mkdtemp() A = 8 # DPR tok A = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] A = os.path.join(self.tmpdirname ,'dpr_tokenizer' ) os.makedirs(A_ ,exist_ok=A_ ) A = os.path.join(A_ ,DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A = {'unk_token': '<unk>'} A = os.path.join(self.tmpdirname ,'bart_tokenizer' ) os.makedirs(A_ ,exist_ok=A_ ) A = os.path.join(A_ ,BART_VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(A_ ,BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'bart_tokenizer' ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = os.path.join(self.tmpdirname ,'rag_tokenizer' ) A = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(A_ ) rag_tokenizer.save_pretrained(A_ ) A = RagTokenizer.from_pretrained(A_ ,config=A_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,A_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,A_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: A = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) A = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] A = tokenizer(A_ ) self.assertIsNotNone(A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) A = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] A = tokenizer(A_ ) self.assertIsNotNone(A_ )
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _snake_case ( snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ): A = a while True: A = Decimal(snake_case__ ) - ( Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(snake_case__ ) ) < precision: # noqa: S307 return float(snake_case__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = '''Hello, World!''' _lowercase = '''en_XX''' def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : bool ): A = Path('data_bin' ) A = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(snake_case__ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(snake_case__ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , ) xmod.eval() # disable dropout print(snake_case__ ) A = xmod.model.encoder.sentence_encoder A = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: A = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our X-MOD config:' , snake_case__ ) A = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings A = xmod_sent_encoder.embed_tokens.weight A = xmod_sent_encoder.embed_positions.weight A = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. A = xmod_sent_encoder.layernorm_embedding.weight A = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A = model.roberta.encoder.layer[i] A = xmod_sent_encoder.layers[i] # self attention A = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('Dimensions of self-attention weights do not match.' ) A = xmod_layer.self_attn.q_proj.weight A = xmod_layer.self_attn.q_proj.bias A = xmod_layer.self_attn.k_proj.weight A = xmod_layer.self_attn.k_proj.bias A = xmod_layer.self_attn.v_proj.weight A = xmod_layer.self_attn.v_proj.bias # self-attention output A = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('Dimensions of self-attention output weights do not match.' ) A = xmod_layer.self_attn.out_proj.weight A = xmod_layer.self_attn.out_proj.bias A = xmod_layer.self_attn_layer_norm.weight A = xmod_layer.self_attn_layer_norm.bias # intermediate A = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('Dimensions of intermediate weights do not match.' ) A = xmod_layer.fca.weight A = xmod_layer.fca.bias # output A = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('Dimensions of feed-forward weights do not match.' ) A = xmod_layer.fca.weight A = xmod_layer.fca.bias A = xmod_layer.final_layer_norm.weight A = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: A = xmod_layer.adapter_layer_norm.weight A = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('Lists of language adapters do not match.' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): A = bert_output.adapter_modules[lang_code] A = xmod_layer.adapter_modules[lang_code] A = from_adapter.fca.weight A = from_adapter.fca.bias A = from_adapter.fca.weight A = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: A = xmod_sent_encoder.layer_norm.weight A = xmod_sent_encoder.layer_norm.bias if classification_head: A = xmod.model.classification_heads['mnli'].dense.weight A = xmod.model.classification_heads['mnli'].dense.bias A = xmod.model.classification_heads['mnli'].out_proj.weight A = xmod.model.classification_heads['mnli'].out_proj.bias else: # LM Head A = xmod.model.encoder.lm_head.dense.weight A = xmod.model.encoder.lm_head.dense.bias A = xmod.model.encoder.lm_head.layer_norm.weight A = xmod.model.encoder.lm_head.layer_norm.bias A = xmod.model.encoder.lm_head.weight A = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. A = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) A = model(snake_case__ )[0] if classification_head: A = xmod.model.classification_heads['mnli'](xmod.extract_features(snake_case__ ) ) else: A = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) A = torch.max(torch.abs(our_output - their_output ) ).item() print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 A = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowercase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) + 1 A = len(snake_case__ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] # since string of zero length match pattern of zero length A = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case__ ): A = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case__ ): A = dp[0][j - 2] if pattern[j - 1] == '*' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case__ ): for j in range(1 , snake_case__ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": A = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: A = 1 elif pattern[j - 2] in (input_string[i - 1], "."): A = dp[i - 1][j] else: A = 0 else: A = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") _lowercase = '''aab''' _lowercase = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F"""{input_string} matches the given pattern {pattern}""") else: print(F"""{input_string} does not match with the given pattern {pattern}""")
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" import random class lowerCAmelCase_ : '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : str ) -> tuple[list[int], list[int]]: A = [ord(A_ ) for i in text] A = [] A = [] for i in plain: A = random.randint(1 ,300 ) A = (i + k) * k cipher.append(A_ ) key.append(A_ ) return cipher, key @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : list[int] ,A_ : list[int] ) -> str: A = [] for i in range(len(A_ ) ): A = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(A_ ) ) return "".join(A_ ) if __name__ == "__main__": _lowercase , _lowercase = Onepad().encrypt('''Hello''') print(c, k) print(Onepad().decrypt(c, k))
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int ): if number < 0: raise ValueError('number must not be negative' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''], '''tokenization_luke''': ['''LukeTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LukeForEntityClassification''', '''LukeForEntityPairClassification''', '''LukeForEntitySpanClassification''', '''LukeForMultipleChoice''', '''LukeForQuestionAnswering''', '''LukeForSequenceClassification''', '''LukeForTokenClassification''', '''LukeForMaskedLM''', '''LukeModel''', '''LukePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int = 10 ): if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('Invalid input' ) A = 10**n A = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _lowercase = { '''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''', '''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''', '''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''', '''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''', '''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''', '''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''', '''mask_downscaling.0''': '''mask_embed.conv1''', '''mask_downscaling.1''': '''mask_embed.layer_norm1''', '''mask_downscaling.3''': '''mask_embed.conv2''', '''mask_downscaling.4''': '''mask_embed.layer_norm2''', '''mask_downscaling.6''': '''mask_embed.conv3''', '''point_embeddings''': '''point_embed''', '''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''', '''image_encoder''': '''vision_encoder''', '''neck.0''': '''neck.conv1''', '''neck.1''': '''neck.layer_norm1''', '''neck.2''': '''neck.conv2''', '''neck.3''': '''neck.layer_norm2''', '''patch_embed.proj''': '''patch_embed.projection''', '''.norm''': '''.layer_norm''', '''blocks''': '''layers''', } def _snake_case ( snake_case__ : Optional[Any] ): A = {} state_dict.pop('pixel_mean' , snake_case__ ) state_dict.pop('pixel_std' , snake_case__ ) A = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): A = int(re.match(snake_case__ , snake_case__ ).group(2 ) ) if layer_nb == 0: A = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: A = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: A = key.replace('layers.2' , 'proj_out' ) A = value A = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def _snake_case ( snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int="ybelkada/segment-anything" ): A = hf_hub_download(snake_case__ , F'checkpoints/{model_name}.pth' ) if "sam_vit_b" in model_name: A = SamConfig() elif "sam_vit_l" in model_name: A = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) A = SamConfig( vision_config=snake_case__ , ) elif "sam_vit_h" in model_name: A = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) A = SamConfig( vision_config=snake_case__ , ) A = torch.load(snake_case__ , map_location='cpu' ) A = replace_keys(snake_case__ ) A = SamImageProcessor() A = SamProcessor(image_processor=snake_case__ ) A = SamModel(snake_case__ ) hf_model.load_state_dict(snake_case__ ) A = hf_model.to('cuda' ) A = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) A = [[[400, 650]]] A = [[1]] A = processor(images=np.array(snake_case__ ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): A = hf_model(**snake_case__ ) A = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 A = processor( images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): A = hf_model(**snake_case__ ) A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 A = ((75, 275, 1725, 850),) A = processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): A = hf_model(**snake_case__ ) A = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. A = [[[400, 650], [800, 650]]] A = [[1, 1]] A = processor( images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): A = hf_model(**snake_case__ ) A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": _lowercase = argparse.ArgumentParser() _lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195'''] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) _lowercase = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = TransfoXLTokenizer _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: super().setUp() A = [ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**A_ : int ) -> Tuple: A = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int] ) -> int: A = '<unk> UNwanted , running' A = '<unk> unwanted, running' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: A = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=A_ ) A = tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(A_ ,['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,[0, 4, 8, 7] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = TransfoXLTokenizer(lower_case=A_ ) A = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' A = [ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(A_ ) ,A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = self.get_tokenizer() A = len(A_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1' ,1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) ,original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ) ,[1] ) self.assertEqual(tokenizer.decode([1] ) ,'new1' )
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import os def _snake_case ( snake_case__ : str = "input.txt" ): with open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) as input_file: A = [ [int(snake_case__ ) for element in line.split(',' )] for line in input_file.readlines() ] A = len(snake_case__ ) A = len(matrix[0] ) A = [[-1 for _ in range(snake_case__ )] for _ in range(snake_case__ )] for i in range(snake_case__ ): A = matrix[i][0] for j in range(1 , snake_case__ ): for i in range(snake_case__ ): A = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , snake_case__ ): A = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): A = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spiece.model'''} _lowercase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowercase = { '''AI-Sweden/gpt-sw3-126m''': 20_48, '''AI-Sweden/gpt-sw3-350m''': 20_48, '''AI-Sweden/gpt-sw3-1.6b''': 20_48, '''AI-Sweden/gpt-sw3-6.7b''': 20_48, '''AI-Sweden/gpt-sw3-20b''': 20_48, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: List[str] = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple ,A_ : Any ,A_ : int=False ,A_ : List[Any]=False ,A_ : Optional[int]=False ,A_ : Union[str, Any]=None ,A_ : Tuple=None ,A_ : Any=None ,A_ : int=None ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Tuple ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs A = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) A = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing A = '<|endoftext|>' if eos_token is None else eos_token A = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: A = unk_token if pad_token is None else pad_token A = eos_token if bos_token is None else bos_token else: A = '<pad>' if pad_token is None else pad_token A = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=A_ ,remove_space=A_ ,keep_accents=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = do_lower_case A = remove_space A = keep_accents A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off A = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing A = re.compile( F'[{"".join(map(A_ ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self : Optional[Any] ) -> Tuple: A = self.__dict__.copy() A = None return state def __setstate__( self : Dict ,A_ : List[str] ) -> str: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str ) -> str: A = self.non_printing_characters_re.sub('' ,A_ ) # Normalize whitespaces A = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization A = unicodedata.normalize('NFC' ,A_ ) return text def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,**A_ : Any ) -> List[str]: A = self.preprocess_text(A_ ) return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ) -> int: return self.sp_model.PieceToId(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> str: return self.sp_model.IdToPiece(A_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : str ) -> str: return out_string def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> str: A = [] A = '' A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token A = True A = [] else: current_sub_tokens.append(A_ ) A = False out_string += self.sp_model.decode(A_ ) return out_string def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, int]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, List[str]] ,A_ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(A_ ,A_ ): A = self.preprocess_text(A_ ) A = self.sp_model.encode(A_ ) else: A = [self.preprocess_text(A_ ) for t in text] A = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": A = torch.tensor(A_ ) return token_ids def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Union[int, List[int]] ) -> str: return self.sp_model.decode(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : "Conversation" ) -> List[int]: A = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] A = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(A_ ) + F'{self.bos_token}Bot:' ) return self.encode(text=A_ )
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : str ,A_ : str ,A_ : int ,A_ : Any ,A_ : Tuple ,A_ : Tuple=0.2 ,A_ : Tuple=0.2 ) -> Optional[Any]: A = bp_numa A = bp_numa A = bp_numa A = conva_get[:2] A = conva_get[2] A = size_pa A = rate_w A = rate_t A = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] A = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) A = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) A = -2 * np.random.rand(self.conva[1] ) + 1 A = -2 * np.random.rand(self.num_bpa ) + 1 A = -2 * np.random.rand(self.num_bpa ) + 1 def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Optional[Any]: # save model dict with pickle A = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(A_ ,'wb' ) as f: pickle.dump(A_ ,A_ ) print(F'Model saved: {save_path}' ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,A_ : int ) -> Dict: # read saved model with open(A_ ,'rb' ) as f: A = pickle.load(A_ ) # noqa: S301 A = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) A = model_dic.get('size_pooling1' ) A = model_dic.get('num_bp1' ) A = model_dic.get('num_bp2' ) A = model_dic.get('num_bp3' ) A = model_dic.get('rate_weight' ) A = model_dic.get('rate_thre' ) # create model instance A = CNN(A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) # modify model parameter A = model_dic.get('w_conv1' ) A = model_dic.get('wkj' ) A = model_dic.get('vji' ) A = model_dic.get('thre_conv1' ) A = model_dic.get('thre_bp2' ) A = model_dic.get('thre_bp3' ) return conv_ins def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str] ) -> List[Any]: return 1 / (1 + np.exp(-1 * x )) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict ) -> List[str]: return round(A_ ,3 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Union[str, Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[Any] ) -> Optional[Any]: # convolution process A = convs[0] A = convs[1] A = np.shape(A_ )[0] # get the data slice of original image data, data_focus A = [] for i_focus in range(0 ,size_data - size_conv + 1 ,A_ ): for j_focus in range(0 ,size_data - size_conv + 1 ,A_ ): A = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix A = [] A = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): A = [] for i_focus in range(len(A_ ) ): A = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) A = np.asmatrix(A_ ).reshape( A_ ,A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion A = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) A = np.asarray(A_ ) return focus_list, data_featuremap def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ,A_ : List[str] ,A_ : Tuple="average_pool" ) -> List[Any]: # pooling process A = len(featuremaps[0] ) A = int(size_map / size_pooling ) A = [] for i_map in range(len(A_ ) ): A = featuremaps[i_map] A = [] for i_focus in range(0 ,A_ ,A_ ): for j_focus in range(0 ,A_ ,A_ ): A = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) A = np.asmatrix(A_ ).reshape(A_ ,A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ) -> List[str]: # expanding three dimension data to one dimension list A = [] for i in range(len(A_ ) ): A = np.shape(data[i] ) A = data[i].reshape(1 ,shapes[0] * shapes[1] ) A = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) A = np.asarray(A_ ) return data_expanded def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ) -> Optional[Any]: # expanding matrix to one dimension list A = np.asarray(A_ ) A = np.shape(A_ ) A = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ,A_ : List[Any] ,A_ : int ,A_ : Optional[Any] ,A_ : List[Any] ) -> Tuple: A = [] A = 0 for i_map in range(A_ ): A = np.ones((size_map, size_map) ) for i in range(0 ,A_ ,A_ ): for j in range(0 ,A_ ,A_ ): A = pd_pool[ i_pool ] A = i_pool + 1 A = np.multiply( A_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Union[str, Any] ,A_ : str ,A_ : Any=bool ) -> Optional[Any]: # model traning print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(A_ )) ) print((' - - Shape: Teach_Data ', np.shape(A_ )) ) A = 0 A = [] A = 1_0000 while rp < n_repeat and mse >= error_accuracy: A = 0 print(F'-------------Learning Time {rp}--------------' ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) A = np.asmatrix(datas_train[p] ) A = np.asarray(datas_teach[p] ) A , A = self.convolute( A_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) A = self.pooling(A_ ,self.size_poolinga ) A = np.shape(A_ ) A = self._expand(A_ ) A = data_bp_input A = np.dot(A_ ,self.vji.T ) - self.thre_bpa A = self.sig(A_ ) A = np.dot(A_ ,self.wkj.T ) - self.thre_bpa A = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- A = np.multiply( (data_teach - bp_outa) ,np.multiply(A_ ,(1 - bp_outa) ) ) A = np.multiply( np.dot(A_ ,self.wkj ) ,np.multiply(A_ ,(1 - bp_outa) ) ) A = np.dot(A_ ,self.vji ) A = pd_i_all / (self.size_poolinga * self.size_poolinga) A = pd_conva_pooled.T.getA().tolist() A = self._calculate_gradient_from_pool( A_ ,A_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): A = self._expand_mat(pd_conva_all[k_conv] ) A = self.rate_weight * np.dot(A_ ,A_ ) A = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) A = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer A = self.wkj + pd_k_all.T * bp_outa * self.rate_weight A = self.vji + pd_j_all.T * bp_outa * self.rate_weight A = self.thre_bpa - pd_k_all * self.rate_thre A = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image A = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) A = rp + 1 A = error_count / patterns all_mse.append(A_ ) def draw_error(): A = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ ,'+-' ) plt.plot(A_ ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(A_ ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, F' - - Mse: {mse:.6f}') ) if draw_e: draw_error() return mse def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ) -> Union[str, Any]: # model predict A = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(A_ )) ) for p in range(len(A_ ) ): A = np.asmatrix(datas_test[p] ) A , A = self.convolute( A_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) A = self.pooling(A_ ,self.size_poolinga ) A = self._expand(A_ ) A = data_bp_input A = bp_outa * self.vji.T - self.thre_bpa A = self.sig(A_ ) A = bp_outa * self.wkj.T - self.thre_bpa A = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) A = [list(map(self.do_round ,A_ ) ) for each in produce_out] return np.asarray(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ) -> Any: # return the data of image after convoluting process so we can check it out A = np.asmatrix(A_ ) A , A = self.convolute( A_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) A = self.pooling(A_ ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_lowercase ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) _lowerCamelCase: ClassVar[Features] = Features({'''text''': Value('''string''' )} ) _lowerCamelCase: ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) _lowerCamelCase: str = "text" _lowerCamelCase: str = "summary" @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) _lowerCamelCase: str = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) _lowerCamelCase: str = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def _snake_case ( ): A = HfArgumentParser((ModelArguments,) ) ((A) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: A = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: A = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed A = True A = True A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case__ , decoder_config=snake_case__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens A = decoder_config.decoder_start_token_id A = decoder_config.pad_token_id if decoder_start_token_id is None: A = decoder_config.bos_token_id if pad_token_id is None: A = decoder_config.eos_token_id # This is necessary to make Flax's generate() work A = decoder_config.eos_token_id A = decoder_start_token_id A = pad_token_id A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BertTokenizer _lowerCamelCase: Optional[int] = BertTokenizerFast _lowerCamelCase: Optional[Any] = True _lowerCamelCase: int = True _lowerCamelCase: str = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().setUp() A = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str: A = 'UNwant\u00E9d,running' A = 'unwanted, running' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.tokenizer_class(self.vocab_file ) A = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(A_ ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,[9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: if not self.test_rust_tokenizer: return A = self.get_tokenizer() A = self.get_rust_tokenizer() A = 'UNwant\u00E9d,running' A = tokenizer.tokenize(A_ ) A = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokenizer.encode(A_ ,add_special_tokens=A_ ) A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ ) self.assertListEqual(A_ ,A_ ) A = self.get_rust_tokenizer() A = tokenizer.encode(A_ ) A = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ ,A_ ) # With lower casing A = self.get_tokenizer(do_lower_case=A_ ) A = self.get_rust_tokenizer(do_lower_case=A_ ) A = 'UNwant\u00E9d,running' A = tokenizer.tokenize(A_ ) A = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokenizer.encode(A_ ,add_special_tokens=A_ ) A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ ) self.assertListEqual(A_ ,A_ ) A = self.get_rust_tokenizer() A = tokenizer.encode(A_ ) A = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: A = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = BasicTokenizer(do_lower_case=A_ ,never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BasicTokenizer() A = 'a\n\'ll !!to?\'d of, can\'t.' A = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] A = {} for i, token in enumerate(A_ ): A = i A = WordpieceTokenizer(vocab=A_ ,unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) ,[] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_tokenizer() A = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.tokenizer_class.from_pretrained('bert-base-uncased' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' A = tokenizer_r.encode_plus( A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ,) A = tokenizer_r.do_lower_case if hasattr(A_ ,'do_lower_case' ) else False A = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = ['的', '人', '有'] A = ''.join(A_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = True A = self.tokenizer_class.from_pretrained(A_ ,**A_ ) A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = tokenizer_p.encode(A_ ,add_special_tokens=A_ ) A = tokenizer_r.encode(A_ ,add_special_tokens=A_ ) A = tokenizer_r.convert_ids_to_tokens(A_ ) A = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A_ ,A_ ) self.assertListEqual(A_ ,A_ ) A = False A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = self.tokenizer_class.from_pretrained(A_ ,**A_ ) A = tokenizer_r.encode(A_ ,add_special_tokens=A_ ) A = tokenizer_p.encode(A_ ,add_special_tokens=A_ ) A = tokenizer_r.convert_ids_to_tokens(A_ ) A = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that only the first Chinese character is not preceded by "##". A = [ F'##{token}' if idx != 0 else token for idx, token in enumerate(A_ ) ] self.assertListEqual(A_ ,A_ ) self.assertListEqual(A_ ,A_ )
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" from itertools import product def _snake_case ( snake_case__ : int , snake_case__ : int ): A = sides_number A = max_face_number * dice_number A = [0] * (max_total + 1) A = 1 A = range(snake_case__ , max_face_number + 1 ) for dice_numbers in product(snake_case__ , repeat=snake_case__ ): A = sum(snake_case__ ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ): A = total_frequency_distribution( sides_number=4 , dice_number=9 ) A = total_frequency_distribution( sides_number=6 , dice_number=6 ) A = 0 A = 9 A = 4 * 9 A = 6 for peter_total in range(snake_case__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) A = (4**9) * (6**6) A = peter_wins_count / total_games_number A = round(snake_case__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" def _snake_case ( ): return 1 def _snake_case ( snake_case__ : int ): return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def _snake_case ( snake_case__ : int ): return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ ) def _snake_case ( snake_case__ : int ): return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ ) def _snake_case ( snake_case__ : int ): return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ ) def _snake_case ( snake_case__ : int ): return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ ) def _snake_case ( snake_case__ : int ): return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(snake_case__ ) def _snake_case ( snake_case__ : int ): return 0 if x < 0 else two_pound(x - 200 ) + one_pound(snake_case__ ) def _snake_case ( snake_case__ : int = 200 ): return two_pound(snake_case__ ) if __name__ == "__main__": print(solution(int(input().strip())))
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" def _snake_case ( snake_case__ : list ): if not grid or not grid[0]: raise TypeError('The grid does not contain the appropriate information' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] A = grid[0] for row_n in range(1 , len(snake_case__ ) ): A = grid[row_n] A = fill_row(snake_case__ , snake_case__ ) A = grid[row_n] return grid[-1][-1] def _snake_case ( snake_case__ : list , snake_case__ : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(snake_case__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,*A_ : int ,**A_ : Union[str, Any] ) -> None: warnings.warn( 'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use OwlViTImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def _snake_case ( snake_case__ : Union[str, Any] ): A = [ 'decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : Union[str, Any] ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : Tuple ): A = torch.load(snake_case__ , map_location='cpu' ) A = Namespace(**checkpoint['cfg']['model'] ) A = checkpoint['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['decoder.embed_tokens.weight'].shape[0] A = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()} A = XGLMConfig( vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A = XGLMForCausalLM(snake_case__ ) A = model.load_state_dict(snake_case__ , strict=snake_case__ ) print(snake_case__ ) A = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" from __future__ import annotations import numpy as np def _snake_case ( snake_case__ : np.ndarray ): A , A = np.shape(snake_case__ ) if rows != columns: A = ( '\'table\' has to be of square shaped array but got a ' F'{rows}x{columns} array:\n{table}' ) raise ValueError(snake_case__ ) A = np.zeros((rows, columns) ) A = np.zeros((rows, columns) ) for i in range(snake_case__ ): for j in range(snake_case__ ): A = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) A = (table[i][j] - total) / upper[j][j] A = 1 for j in range(snake_case__ , snake_case__ ): A = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) ) A = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Tuple ): return EnvironmentCommand() def _snake_case ( snake_case__ : List[str] ): return EnvironmentCommand(args.accelerate_config_file ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) download_parser.add_argument( '--accelerate-config_file' ,default=A_ ,help='The accelerate config file to use for the default values in the launching script.' ,) download_parser.set_defaults(func=A_ ) def __init__( self : List[Any] ,A_ : Optional[int] ,*A_ : Tuple ) -> None: A = accelerate_config_file def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = 'not installed' if is_safetensors_available(): import safetensors A = safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors A = F'{safetensors.__version__} but is ignored because of PyTorch version too old.' A = 'not installed' A = A = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(A_ ): A = load_config_from_file(self._accelerate_config_file ).to_dict() A = ( '\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(A_ ,A_ ) else F'\t{accelerate_config}' ) A = 'not installed' A = 'NA' if is_torch_available(): import torch A = torch.__version__ A = torch.cuda.is_available() A = 'not installed' A = 'NA' if is_tf_available(): import tensorflow as tf A = tf.__version__ try: # deprecated in v2.1 A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool A = bool(tf.config.list_physical_devices('GPU' ) ) A = 'not installed' A = 'not installed' A = 'not installed' A = 'NA' if is_flax_available(): import flax import jax import jaxlib A = flax.__version__ A = jax.__version__ A = jaxlib.__version__ A = jax.lib.xla_bridge.get_backend().platform A = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F'{safetensors_version}', 'Accelerate version': F'{accelerate_version}', 'Accelerate config': F'{accelerate_config_str}', 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Tensorflow version (GPU?)': F'{tf_version} ({tf_cuda_available})', 'Flax version (CPU?/GPU?/TPU?)': F'{flax_version} ({jax_backend})', 'Jax version': F'{jax_version}', 'JaxLib version': F'{jaxlib_version}', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Union[str, Any] ) -> Dict: return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1
"""simple docstring""" import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Optional[int] ) -> Optional[int]: return F'gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy' def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict=0 ,A_ : Tuple=(4, 4, 64, 64) ,A_ : Optional[int]=False ) -> Any: A = jnp.bfloataa if fpaa else jnp.floataa A = jnp.array(load_hf_numpy(self.get_file_format(A_ ,A_ ) ) ,dtype=A_ ) return image def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any]=False ,A_ : Optional[int]="CompVis/stable-diffusion-v1-4" ) -> str: A = jnp.bfloataa if fpaa else jnp.floataa A = 'bf16' if fpaa else None A , A = FlaxUNetaDConditionModel.from_pretrained( A_ ,subfolder='unet' ,dtype=A_ ,revision=A_ ) return model, params def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tuple=0 ,A_ : Optional[Any]=(4, 77, 768) ,A_ : List[str]=False ) -> Optional[int]: A = jnp.bfloataa if fpaa else jnp.floataa A = jnp.array(load_hf_numpy(self.get_file_format(A_ ,A_ ) ) ,dtype=A_ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]], [17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]], [8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]], [3, 1000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ,A_ : Any ,A_ : List[Any] ) -> Tuple: A , A = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' ,fpaa=A_ ) A = self.get_latents(A_ ,fpaa=A_ ) A = self.get_encoder_hidden_states(A_ ,fpaa=A_ ) A = model.apply( {'params': params} ,A_ ,jnp.array(A_ ,dtype=jnp.intaa ) ,encoder_hidden_states=A_ ,).sample assert sample.shape == latents.shape A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa ) A = jnp.array(A_ ,dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(A_ ,A_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]], [17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]], [8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]], [3, 1000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ,A_ : Dict ,A_ : str ) -> str: A , A = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' ,fpaa=A_ ) A = self.get_latents(A_ ,shape=(4, 4, 96, 96) ,fpaa=A_ ) A = self.get_encoder_hidden_states(A_ ,shape=(4, 77, 1024) ,fpaa=A_ ) A = model.apply( {'params': params} ,A_ ,jnp.array(A_ ,dtype=jnp.intaa ) ,encoder_hidden_states=A_ ,).sample assert sample.shape == latents.shape A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa ) A = jnp.array(A_ ,dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(A_ ,A_ ,atol=1e-2 )
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : List[Any] ,A_ : List[str]=13 ,A_ : Union[str, Any]=7 ,A_ : Dict=True ,A_ : List[str]=True ,A_ : str=False ,A_ : Any=True ,A_ : Any=99 ,A_ : List[Any]=32 ,A_ : int=5 ,A_ : Tuple=4 ,A_ : Optional[Any]=37 ,A_ : Optional[Any]="gelu" ,A_ : str=0.1 ,A_ : Optional[Any]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Any=16 ,A_ : Dict=2 ,A_ : str=0.02 ,A_ : Union[str, Any]=3 ,A_ : Tuple=4 ,A_ : Any=None ,) -> Optional[int]: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : str ,A_ : Optional[Any] ) -> Optional[Any]: A = DistilBertModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Optional[int] ,A_ : Dict ,A_ : int ,A_ : List[Any] ,A_ : int ) -> List[Any]: A = DistilBertForMaskedLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int] ,A_ : Dict ,A_ : int ,A_ : int ,A_ : str ,A_ : Any ) -> Optional[Any]: A = DistilBertForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,start_positions=A_ ,end_positions=A_ ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : int ,A_ : List[Any] ,A_ : Optional[Any] ) -> Optional[Any]: A = self.num_labels A = DistilBertForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[str] ,A_ : Tuple ,A_ : List[str] ,A_ : str ,A_ : Union[str, Any] ,A_ : List[str] ) -> Optional[Any]: A = self.num_labels A = DistilBertForTokenClassification(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Union[str, Any] ,A_ : int ,A_ : List[str] ,A_ : Tuple ) -> Optional[int]: A = self.num_choices A = DistilBertForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = self.prepare_config_and_inputs() ((A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _lowerCamelCase: Optional[Any] = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: Dict = True _lowerCamelCase: Dict = True _lowerCamelCase: List[str] = True _lowerCamelCase: Any = True def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A = DistilBertModelTester(self ) A = ConfigTester(self ,config_class=A_ ,dim=37 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = DistilBertModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @slow @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return A = True A = model_class(config=A_ ) A = self._prepare_for_class(A_ ,A_ ) A = torch.jit.trace( A_ ,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A_ ,os.path.join(A_ ,'traced_model.pt' ) ) A = torch.jit.load(os.path.join(A_ ,'traced_model.pt' ) ,map_location=A_ ) loaded(inputs_dict['input_ids'].to(A_ ) ,inputs_dict['attention_mask'].to(A_ ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = DistilBertModel.from_pretrained('distilbert-base-uncased' ) A = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A = model(A_ ,attention_mask=A_ )[0] A = torch.Size((1, 11, 768) ) self.assertEqual(output.shape ,A_ ) A = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A_ ,atol=1e-4 ) )
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: Optional[datasets.Features] = None def _snake_case ( snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : List[int] , ): import pyspark def generate_fn(): A = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: A = df_with_partition_id.select('*' ).where(F'part_id = {partition_id}' ).drop('part_id' ) A = partition_df.collect() A = 0 for row in rows: yield F'{partition_id}_{row_id}', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase_ ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self : str ,A_ : "pyspark.sql.DataFrame" ,A_ : Optional[int]=None ,) -> str: A = df A = partition_order or range(self.df.rdd.getNumPartitions() ) A = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self : Dict ) -> Dict: yield from self.generate_examples_fn() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : np.random.Generator ) -> "SparkExamplesIterable": A = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(A_ ) return SparkExamplesIterable(self.df ,partition_order=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : int ) -> "SparkExamplesIterable": A = self.split_shard_indices_by_worker(A_ ,A_ ) return SparkExamplesIterable(self.df ,partition_order=A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: return len(self.partition_order ) class lowerCAmelCase_ ( datasets.DatasetBuilder ): '''simple docstring''' _lowerCamelCase: Any = SparkConfig def __init__( self : Union[str, Any] ,A_ : "pyspark.sql.DataFrame" ,A_ : str = None ,A_ : str = None ,**A_ : Union[str, Any] ,) -> Union[str, Any]: import pyspark A = pyspark.sql.SparkSession.builder.getOrCreate() A = df A = working_dir super().__init__( cache_dir=A_ ,config_name=str(self.df.semanticHash() ) ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: # Returns the path of the created file. def create_cache_and_write_probe(A_ : int ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=A_ ) A = os.path.join(self._cache_dir ,'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(A_ ,'a' ) return [probe_file] if self._spark.conf.get('spark.master' ,'' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: A = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(A_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : datasets.download.download_manager.DownloadManager ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: import pyspark def get_arrow_batch_size(A_ : Tuple ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) A = self.df.count() A = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. A = ( self.df.limit(A_ ) .repartition(1 ) .mapInArrow(A_ ,'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) A = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. A = min(A_ ,int(approx_total_size / max_shard_size ) ) A = self.df.repartition(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : str ,A_ : int ,) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark A = ParquetWriter if file_format == 'parquet' else ArrowWriter A = os.path.join(self._working_dir ,os.path.basename(A_ ) ) if self._working_dir else fpath A = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. A = self.config.features A = self._writer_batch_size A = self._fs.storage_options def write_arrow(A_ : List[Any] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. A = pyspark.TaskContext().taskAttemptId() A = next(A_ ,A_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=['task_id', 'num_examples', 'num_bytes'] ,) A = 0 A = writer_class( features=A_ ,path=working_fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,writer_batch_size=A_ ,storage_options=A_ ,embed_local_files=A_ ,) A = pa.Table.from_batches([first_batch] ) writer.write_table(A_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: A , A = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,) shard_id += 1 A = writer_class( features=writer._features ,path=working_fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,writer_batch_size=A_ ,storage_options=A_ ,embed_local_files=A_ ,) A = pa.Table.from_batches([batch] ) writer.write_table(A_ ) if writer._num_bytes > 0: A , A = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(A_ ) ): A = os.path.join(os.path.dirname(A_ ) ,os.path.basename(A_ ) ) shutil.move(A_ ,A_ ) A = ( self.df.mapInArrow(A_ ,'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) ,pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) ,pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) ,pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : "datasets.SplitGenerator" ,A_ : str = "arrow" ,A_ : Optional[Union[str, int]] = None ,A_ : Optional[int] = None ,**A_ : List[str] ,) -> Union[str, Any]: self._validate_cache_dir() A = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(A_ ) A = not is_remote_filesystem(self._fs ) A = os.path.join if is_local else posixpath.join A = '-TTTTT-SSSSS-of-NNNNN' A = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}' A = path_join(self._output_dir ,A_ ) A = 0 A = 0 A = 0 A = [] A = [] for task_id, content in self._prepare_split_single(A_ ,A_ ,A_ ): ( ( A ) , ( A ) , ( A ) , ( A ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(A_ ) A = total_num_examples A = total_num_bytes # should rename everything at the end logger.debug(F'Renaming {total_shards} shards.' ) if total_shards > 1: A = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. A = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( A_ : int ,A_ : int ,A_ : int ,): rename( A_ ,fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,fpath.replace('TTTTT-SSSSS' ,F'{global_shard_id:05d}' ).replace('NNNNN' ,F'{total_shards:05d}' ) ,) A = [] A = 0 for i in range(len(A_ ) ): A , A = task_id_and_num_shards[i] for shard_id in range(A_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(A_ ,len(A_ ) ).map(lambda A_ : _rename_shard(*A_ ) ).collect() else: # don't use any pattern A = 0 A = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,fpath.replace(A_ ,'' ) ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : "datasets.SplitGenerator" ,) -> SparkExamplesIterable: return SparkExamplesIterable(self.df )
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : List[Any] ,A_ : Optional[int]=14 ,A_ : int=7 ,A_ : Dict=True ,A_ : str=True ,A_ : List[str]=False ,A_ : Optional[Any]=True ,A_ : int=99 ,A_ : List[str]=32 ,A_ : Optional[Any]=4 ,A_ : int=4 ,A_ : int=4 ,A_ : Dict=37 ,A_ : Tuple="gelu" ,A_ : Tuple=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : Dict=512 ,A_ : Optional[int]=0.02 ,) -> Union[str, Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = rotary_dim A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = initializer_range A = None A = vocab_size - 1 A = vocab_size - 1 A = vocab_size - 1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = GPTJConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,use_cache=A_ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,rotary_dim=self.rotary_dim ,) return (config, input_ids, input_mask) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ) -> List[Any]: A = 20 A = model_class_name(A_ ) A = model.init_cache(input_ids.shape[0] ,A_ ) A = jnp.ones((input_ids.shape[0], max_decoder_length) ,dtype='i4' ) A = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) ) A = model( input_ids[:, :-1] ,attention_mask=A_ ,past_key_values=A_ ,position_ids=A_ ,) A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype='i4' ) A = model( input_ids[:, -1:] ,attention_mask=A_ ,past_key_values=outputs_cache.past_key_values ,position_ids=A_ ,) A = model(A_ ) A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=F'Max diff is {diff}' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : Union[str, Any] ,A_ : int ,A_ : Optional[Any] ) -> Optional[int]: A = 20 A = model_class_name(A_ ) A = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] ,axis=-1 ,) A = model.init_cache(input_ids.shape[0] ,A_ ) A = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) ) A = model( input_ids[:, :-1] ,attention_mask=A_ ,past_key_values=A_ ,position_ids=A_ ,) A = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype='i4' ) A = model( input_ids[:, -1:] ,past_key_values=outputs_cache.past_key_values ,attention_mask=A_ ,position_ids=A_ ,) A = model(A_ ,attention_mask=A_ ) A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=F'Max diff is {diff}' ) @require_flax class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _lowerCamelCase: Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = FlaxGPTJModelTester(self ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: for model_class_name in self.all_model_classes: A , A , A = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: for model_class_name in self.all_model_classes: A , A , A = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( A_ ,A_ ,A_ ,A_ ) @tooslow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = GPTaTokenizer.from_pretrained('gpt2' ,pad_token='<|endoftext|>' ,padding_side='left' ) A = tokenizer(['Hello this is a long string', 'Hey'] ,return_tensors='np' ,padding=A_ ,truncation=A_ ) A = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) A = False A = model.config.eos_token_id A = jax.jit(model.generate ) A = jit_generate( inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,pad_token_id=tokenizer.pad_token_id ).sequences A = tokenizer.batch_decode(A_ ,skip_special_tokens=A_ ) A = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(A_ ,A_ ) @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs A = self._prepare_for_class(A_ ,A_ ) A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class A = model_class.__name__[4:] # Skip the "Flax" at the beginning A = getattr(A_ ,A_ ) A , A = pt_inputs['input_ids'].shape A = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(A_ ): A = 0 A = 1 A = 0 A = 1 A = pt_model_class(A_ ).eval() A = model_class(A_ ,dtype=jnp.floataa ) A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,A_ ) A = fx_state with torch.no_grad(): A = pt_model(**A_ ).to_tuple() A = fx_model(**A_ ).to_tuple() self.assertEqual(len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(A_ ,A_ ): self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(A_ ) A = model_class.from_pretrained(A_ ,from_pt=A_ ) A = fx_model_loaded(**A_ ).to_tuple() self.assertEqual( len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(A_ ,A_ ): self.assert_almost_equals(fx_output_loaded[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs A = self._prepare_for_class(A_ ,A_ ) A = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class A = model_class.__name__[4:] # Skip the "Flax" at the beginning A = getattr(A_ ,A_ ) A = pt_model_class(A_ ).eval() A = model_class(A_ ,dtype=jnp.floataa ) A = load_flax_weights_in_pytorch_model(A_ ,fx_model.params ) A , A = pt_inputs['input_ids'].shape A = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(A_ ): A = 0 A = 1 A = 0 A = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): A = pt_model(**A_ ).to_tuple() A = fx_model(**A_ ).to_tuple() self.assertEqual(len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(A_ ,A_ ): self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(A_ ) A = pt_model_class.from_pretrained(A_ ,from_flax=A_ ) with torch.no_grad(): A = pt_model_loaded(**A_ ).to_tuple() self.assertEqual( len(A_ ) ,len(A_ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(A_ ,A_ ): self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 ) @tooslow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: for model_class_name in self.all_model_classes: A = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) A = model(np.ones((1, 1) ) ) self.assertIsNotNone(A_ )
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ): if len(snake_case__ ) != len(snake_case__ ): raise ValueError('The length of profit and weight must be same.' ) if max_weight <= 0: raise ValueError('max_weight must greater than zero.' ) if any(p < 0 for p in profit ): raise ValueError('Profit can not be negative.' ) if any(w < 0 for w in weight ): raise ValueError('Weight can not be negative.' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. A = [p / w for p, w in zip(snake_case__ , snake_case__ )] # Creating a copy of the list and sorting profit/weight in ascending order A = sorted(snake_case__ ) # declaring useful variables A = len(snake_case__ ) A = 0 A = 0 A = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight A = sorted_profit_by_weight[length - i - 1] A = profit_by_weight.index(snake_case__ ) A = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) _lowercase = [int(x) for x in input('''Input profits separated by spaces: ''').split()] _lowercase = [int(x) for x in input('''Input weights separated by spaces: ''').split()] _lowercase = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[Any] ) -> Union[str, Any]: A = name A = val def __str__( self : Dict ) -> Tuple: return F'{self.__class__.__name__}({self.name}, {self.val})' def __lt__( self : Union[str, Any] ,A_ : List[str] ) -> str: return self.val < other.val class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : Union[str, Any] ) -> List[str]: A = {} A = {} A = self.build_heap(A_ ) def __getitem__( self : str ,A_ : Dict ) -> Tuple: return self.get_value(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> List[Any]: return (idx - 1) // 2 def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ) -> Union[str, Any]: return idx * 2 + 1 def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ) -> Tuple: return idx * 2 + 2 def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ) -> Any: return self.heap_dict[key] def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any ) -> int: A = len(A_ ) - 1 A = self.get_parent_idx(A_ ) for idx, i in enumerate(A_ ): A = idx A = i.val for i in range(A_ ,-1 ,-1 ): self.sift_down(A_ ,A_ ) return array def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ,A_ : Tuple ) -> int: while True: A = self.get_left_child_idx(A_ ) # noqa: E741 A = self.get_right_child_idx(A_ ) A = idx if l < len(A_ ) and array[l] < array[idx]: A = l if r < len(A_ ) and array[r] < array[smallest]: A = r if smallest != idx: A , A = array[smallest], array[idx] ( ( A ) , ( A ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> Tuple: A = self.get_parent_idx(A_ ) while p >= 0 and self.heap[p] > self.heap[idx]: A , A = self.heap[idx], self.heap[p] A , A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: return self.heap[0] def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A , A = self.heap[-1], self.heap[0] A , A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[int] ) -> Optional[int]: self.heap.append(A_ ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: return len(self.heap ) == 0 def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> List[Any]: assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _lowercase = Node('''R''', -1) _lowercase = Node('''B''', 6) _lowercase = Node('''A''', 3) _lowercase = Node('''X''', 1) _lowercase = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _lowercase = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _lowercase = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } _lowercase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def _snake_case ( snake_case__ : List[str] , snake_case__ : Dict=False ): A , A = create_model( 'HTSAT-tiny' , 'roberta' , snake_case__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=snake_case__ , fusion_type='aff_2d' if enable_fusion else None , ) return model, model_cfg def _snake_case ( snake_case__ : Dict ): A = {} A = r'.*sequential.(\d+).*' A = r'.*_projection.(\d+).*' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A = re.match(snake_case__ , snake_case__ ).group(1 ) A = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A = 1 if projecton_layer == 0 else 2 A = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A = value A = mixed_qkv.size(0 ) // 3 A = mixed_qkv[:qkv_dim] A = mixed_qkv[qkv_dim : qkv_dim * 2] A = mixed_qkv[qkv_dim * 2 :] A = query_layer A = key_layer A = value_layer else: A = value return model_state_dict def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str=False ): A , A = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A = clap_model.state_dict() A = rename_state_dict(snake_case__ ) A = ClapConfig() A = enable_fusion A = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') _lowercase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" def _snake_case ( snake_case__ : str , snake_case__ : str = " " ): A = [] A = 0 for index, char in enumerate(snake_case__ ): if char == separator: split_words.append(string[last_index:index] ) A = index + 1 elif index + 1 == len(snake_case__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = '''altclip_text_model''' def __init__( self : Tuple ,A_ : List[str]=25_0002 ,A_ : Tuple=1024 ,A_ : Dict=24 ,A_ : Union[str, Any]=16 ,A_ : str=4096 ,A_ : str="gelu" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : Tuple=514 ,A_ : int=1 ,A_ : Union[str, Any]=0.02 ,A_ : Dict=0.02 ,A_ : Dict=1e-05 ,A_ : int=1 ,A_ : List[Any]=0 ,A_ : Optional[Any]=2 ,A_ : int="absolute" ,A_ : Optional[int]=True ,A_ : Dict=768 ,**A_ : Tuple ,) -> str: super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = initializer_factor A = layer_norm_eps A = position_embedding_type A = use_cache A = project_dim class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = '''altclip_vision_model''' def __init__( self : int ,A_ : Any=768 ,A_ : List[str]=3072 ,A_ : str=512 ,A_ : Optional[Any]=12 ,A_ : str=12 ,A_ : str=3 ,A_ : Dict=224 ,A_ : Optional[int]=32 ,A_ : Optional[Any]="quick_gelu" ,A_ : int=1e-5 ,A_ : Union[str, Any]=0.0 ,A_ : Union[str, Any]=0.02 ,A_ : int=1.0 ,**A_ : List[Any] ,) -> Optional[int]: super().__init__(**A_ ) A = hidden_size A = intermediate_size A = projection_dim A = num_hidden_layers A = num_attention_heads A = num_channels A = patch_size A = image_size A = initializer_range A = initializer_factor A = attention_dropout A = layer_norm_eps A = hidden_act @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,A_ : Union[str, os.PathLike] ,**A_ : str ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) A , A = cls.get_config_dict(A_ ,**A_ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": A = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ ,**A_ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''altclip''' _lowerCamelCase: List[str] = True def __init__( self : Optional[Any] ,A_ : str=None ,A_ : Tuple=None ,A_ : str=768 ,A_ : Any=2.65_92 ,**A_ : int ) -> Optional[int]: # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). A = kwargs.pop('text_config_dict' ,A_ ) A = kwargs.pop('vision_config_dict' ,A_ ) super().__init__(**A_ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: A = {} # This is the complete result when using `text_config_dict`. A = AltCLIPTextConfig(**A_ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: A = ( F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' F'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: A = ( F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' F'value `text_config["{key}"]` will be overriden.' ) logger.warning(A_ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: A = {} # This is the complete result when using `vision_config_dict`. A = AltCLIPVisionConfig(**A_ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: A = { str(A_ ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: A = ( F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' F'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: A = ( F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' F'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(A_ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: A = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: A = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) A = AltCLIPTextConfig(**A_ ) A = AltCLIPVisionConfig(**A_ ) A = projection_dim A = logit_scale_init_value A = 1.0 @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ,A_ : AltCLIPTextConfig ,A_ : AltCLIPVisionConfig ,**A_ : List[Any] ) -> Dict: return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = copy.deepcopy(self.__dict__ ) A = self.text_config.to_dict() A = self.vision_config.to_dict() A = self.__class__.model_type return output
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: # A mock response for an HTTP head request to emulate server down A = mock.Mock() A = 500 A = {} A = HTTPError A = {} # Download this model to make sure it's in the cache. A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' ,return_value=A_ ) as mock_head: A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: # A mock response for an HTTP head request to emulate server down A = mock.Mock() A = 500 A = {} A = HTTPError A = {} # Download this model to make sure it's in the cache. A = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' ,return_value=A_ ) as mock_head: A = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: # This test is for deprecated behavior and can be removed in v5 try: A = tempfile.mktemp() with open(A_ ,'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,A_ ) A = AlbertTokenizer.from_pretrained(A_ ) finally: os.remove(A_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' ,'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,A_ ) A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: # This test is for deprecated behavior and can be removed in v5 A = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) -> Optional[Any]: A = TOKEN HfFolder.save_token(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ) -> List[Any]: try: delete_repo(token=cls._token ,repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(A_ ,'vocab.txt' ) with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) A = BertTokenizer(A_ ) tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token ) A = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ,repo_id='test-tokenizer' ,push_to_hub=A_ ,use_auth_token=self._token ) A = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(A_ ,'vocab.txt' ) with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) A = BertTokenizer(A_ ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token ) A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A_ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=A_ ,use_auth_token=self._token ) A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(A_ ,'vocab.txt' ) with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) A = CustomTokenizer(A_ ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token ) A = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A = os.path.join(A_ ,'vocab.txt' ) with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) A = BertTokenizerFast.from_pretrained(A_ ) bert_tokenizer.save_pretrained(A_ ) A = CustomTokenizerFast.from_pretrained(A_ ) tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token ) A = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' ) A = AutoTokenizer.from_pretrained( F'{USER}/test-dynamic-tokenizer' ,use_fast=A_ ,trust_remote_code=A_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' ) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: A = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: A = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: A = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: # Even if the offsets are wrong, we necessarily output correct string # parts. A = Trie() A = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(A_ ,['AB', 'C'] )
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : list ) -> None: A = set_counts A = max(A_ ) A = len(A_ ) A = [1] * num_sets A = list(range(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> bool: A = self.get_parent(A_ ) A = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] A = 0 A = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 A = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] A = 0 A = src_parent A = self.set_counts[src_parent] A = max(self.max_set ,A_ ) return True def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ) -> int: if self.parents[disj_set] == disj_set: return disj_set A = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" import argparse from collections import defaultdict import yaml _lowercase = '''docs/source/en/_toctree.yml''' def _snake_case ( snake_case__ : str ): A = defaultdict(snake_case__ ) A = [] A = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(snake_case__ ) A = new_doc_list A = [key for key, value in counts.items() if value > 1] A = [] for duplicate_key in duplicates: A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) A = sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(snake_case__ ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(snake_case__ ) # Sort return overview_doc def _snake_case ( snake_case__ : Optional[int]=False ): with open(snake_case__ , encoding='utf-8' ) as f: A = yaml.safe_load(f.read() ) # Get to the API doc A = 0 while content[api_idx]["title"] != "API": api_idx += 1 A = content[api_idx]['sections'] # Then to the model doc A = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A = api_doc[scheduler_idx]['sections'] A = clean_doc_toc(snake_case__ ) A = False if new_scheduler_doc != scheduler_doc: A = True if overwrite: A = new_scheduler_doc if diff: if overwrite: A = api_doc with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def _snake_case ( snake_case__ : Dict=False ): with open(snake_case__ , encoding='utf-8' ) as f: A = yaml.safe_load(f.read() ) # Get to the API doc A = 0 while content[api_idx]["title"] != "API": api_idx += 1 A = content[api_idx]['sections'] # Then to the model doc A = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A = False A = api_doc[pipeline_idx]['sections'] A = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A = pipeline_doc['section'] A = clean_doc_toc(snake_case__ ) if overwrite: A = new_sub_pipeline_doc new_pipeline_docs.append(snake_case__ ) # sort overall pipeline doc A = clean_doc_toc(snake_case__ ) if new_pipeline_docs != pipeline_docs: A = True if overwrite: A = new_pipeline_docs if diff: if overwrite: A = api_doc with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowercase = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : list ): if len(snake_case__ ) == 0: return [] A , A = min(snake_case__ ), max(snake_case__ ) A = int(max_value - min_value ) + 1 A = [[] for _ in range(snake_case__ )] for i in my_list: buckets[int(i - min_value )].append(snake_case__ ) return [v for bucket in buckets for v in sorted(snake_case__ )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : int ): A = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , ) A = DetaConfig( backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , ) # set labels A = 'huggingface/label-files' if "o365" in model_name: A = 366 A = 'object365-id2label.json' else: A = 91 A = 'coco-detection-id2label.json' A = num_labels A = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} return config def _snake_case ( snake_case__ : Any ): A = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') ) # fmt: on return rename_keys def _snake_case ( snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Any ): A = dct.pop(snake_case__ ) A = val def _snake_case ( snake_case__ : Any , snake_case__ : Optional[int] ): A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' ) A = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:dim, :] A = in_proj_bias[: dim] A = in_proj_weight[ dim : dim * 2, : ] A = in_proj_bias[ dim : dim * 2 ] A = in_proj_weight[ -dim :, : ] A = in_proj_bias[-dim :] # fmt: on def _snake_case ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ): # transformer decoder self-attention layers A = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention A = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[:hidden_size, :] A = in_proj_bias[:hidden_size] A = in_proj_weight[ hidden_size : hidden_size * 2, : ] A = in_proj_bias[hidden_size : hidden_size * 2] A = in_proj_weight[-hidden_size:, :] A = in_proj_bias[-hidden_size:] def _snake_case ( ): A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def _snake_case ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str] ): A = get_deta_config(snake_case__ ) # load original state dict if model_name == "deta-swin-large": A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(F'Model name {model_name} not supported' ) A = torch.load(snake_case__ , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(snake_case__ , param.shape ) # rename keys A = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_swin_q_k_v(snake_case__ , config.backbone_config ) read_in_decoder_q_k_v(snake_case__ , snake_case__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: A = state_dict.pop(snake_case__ ) A = val if "input_proj" in key: A = state_dict.pop(snake_case__ ) A = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: A = state_dict.pop(snake_case__ ) A = val # finally, create HuggingFace model and load state dict A = DetaForObjectDetection(snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() A = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(snake_case__ ) # load image processor A = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image A = prepare_img() A = processor(images=snake_case__ , return_tensors='pt' ) A = encoding['pixel_values'] A = model(pixel_values.to(snake_case__ ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": A = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": A = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1e-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(F'jozhang97/{model_name}' ) processor.push_to_hub(F'jozhang97/{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowercase = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowercase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowerCamelCase: Optional[str] = field( default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , ) _lowerCamelCase: int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) A = import_module('tasks' ) try: A = getattr(snake_case__ , model_args.task_type ) A = token_classification_task_clazz() except AttributeError: raise ValueError( F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , snake_case__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task A = token_classification_task.get_labels(data_args.labels ) A = dict(enumerate(snake_case__ ) ) A = len(snake_case__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__ )} , cache_dir=model_args.cache_dir , ) A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) A = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) # Get datasets A = ( TokenClassificationDataset( token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A = ( TokenClassificationDataset( token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> Tuple[List[int], List[int]]: A = np.argmax(snake_case__ , axis=2 ) A , A = preds.shape A = [[] for _ in range(snake_case__ )] A = [[] for _ in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(snake_case__ : EvalPrediction ) -> Dict: A , A = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(snake_case__ , snake_case__ ), "precision": precision_score(snake_case__ , snake_case__ ), "recall": recall_score(snake_case__ , snake_case__ ), "f1": fa_score(snake_case__ , snake_case__ ), } # Data collator A = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A = trainer.evaluate() A = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(snake_case__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , snake_case__ , snake_case__ ) writer.write('%s = %s\n' % (key, value) ) results.update(snake_case__ ) # Predict if training_args.do_predict: A = TokenClassificationDataset( token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) A , A , A = trainer.predict(snake_case__ ) A , A = align_predictions(snake_case__ , snake_case__ ) A = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(snake_case__ , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , snake_case__ , snake_case__ ) writer.write('%s = %s\n' % (key, value) ) # Save predictions A = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(snake_case__ , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__ ) return results def _snake_case ( snake_case__ : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ): A = AutoTokenizer.from_pretrained(snake_case__ ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : int ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def _snake_case ( snake_case__ : int , snake_case__ : Union[str, Any] ): # Initialize accelerator A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = args.model_name_or_path set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A = 1 A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 # Now we train the model A = evaluate.load('glue' , 'mrpc' ) A = 0 A = {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) A = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(snake_case__ , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , ) parser.add_argument( '--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
import torch from torch import nn class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1 , __lowerCAmelCase=False ): """simple docstring""" super().__init__() __magic_name__ :Union[str, Any] = n_token __magic_name__ :Union[str, Any] = d_embed __magic_name__ :int = d_proj __magic_name__ :List[Any] = cutoffs + [n_token] __magic_name__ :str = [0] + self.cutoffs __magic_name__ :int = div_val __magic_name__ :Any = self.cutoffs[0] __magic_name__ :Optional[int] = len(self.cutoffs ) - 1 __magic_name__ :Union[str, Any] = self.shortlist_size + self.n_clusters if self.n_clusters > 0: __magic_name__ :str = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) __magic_name__ :Tuple = nn.Parameter(torch.zeros(self.n_clusters ) ) __magic_name__ :Union[str, Any] = nn.ModuleList() __magic_name__ :Any = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) ) else: self.out_projs.append(__lowerCAmelCase ) self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) ) else: for i in range(len(self.cutoffs ) ): __magic_name__ , __magic_name__ :str = self.cutoff_ends[i], self.cutoff_ends[i + 1] __magic_name__ :Any = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) ) self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) ) __magic_name__ :List[str] = keep_order def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if proj is None: __magic_name__ :Any = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: __magic_name__ :Any = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() ) __magic_name__ :str = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def A ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ): """simple docstring""" if labels is not None: # Shift so that tokens < n predict n __magic_name__ :List[str] = hidden[..., :-1, :].contiguous() __magic_name__ :Dict = labels[..., 1:].contiguous() __magic_name__ :Any = hidden.view(-1 , hidden.size(-1 ) ) __magic_name__ :Optional[int] = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: __magic_name__ :Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: __magic_name__ :int = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: __magic_name__ :Optional[Any] = labels != -1_0_0 __magic_name__ :int = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device ) __magic_name__ :Dict = ( -nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: __magic_name__ :str = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) else: # construct weights and biases __magic_name__ , __magic_name__ :List[Any] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __magic_name__ , __magic_name__ :List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] __magic_name__ :Optional[Any] = self.out_layers[0].weight[l_idx:r_idx] __magic_name__ :Optional[int] = self.out_layers[0].bias[l_idx:r_idx] else: __magic_name__ :List[str] = self.out_layers[i].weight __magic_name__ :Union[str, Any] = self.out_layers[i].bias if i == 0: __magic_name__ :List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __magic_name__ :int = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__lowerCAmelCase ) biases.append(__lowerCAmelCase ) __magic_name__ , __magic_name__ , __magic_name__ :int = weights[0], biases[0], self.out_projs[0] __magic_name__ :List[Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :List[Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) if labels is None: __magic_name__ :str = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: __magic_name__ :int = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device ) __magic_name__ :Tuple = 0 __magic_name__ :Optional[Any] = [0] + self.cutoffs for i in range(len(__lowerCAmelCase ) - 1 ): __magic_name__ , __magic_name__ :str = cutoff_values[i], cutoff_values[i + 1] if labels is not None: __magic_name__ :Tuple = (labels >= l_idx) & (labels < r_idx) __magic_name__ :Optional[Any] = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue __magic_name__ :Union[str, Any] = labels.index_select(0 , __lowerCAmelCase ) - l_idx __magic_name__ :Tuple = head_logprob.index_select(0 , __lowerCAmelCase ) __magic_name__ :List[Any] = hidden.index_select(0 , __lowerCAmelCase ) else: __magic_name__ :Any = hidden if i == 0: if labels is not None: __magic_name__ :Optional[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: __magic_name__ :List[Any] = head_logprob[:, : self.cutoffs[0]] else: __magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = weights[i], biases[i], self.out_projs[i] __magic_name__ :Union[str, Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Union[str, Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) __magic_name__ :Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: __magic_name__ :Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: __magic_name__ :Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i __magic_name__ :int = logprob_i if labels is not None: if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 , __lowerCAmelCase , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def A ( self , __lowerCAmelCase ): """simple docstring""" if self.n_clusters == 0: __magic_name__ :Optional[Any] = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) else: # construct weights and biases __magic_name__ , __magic_name__ :List[str] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __magic_name__ , __magic_name__ :Any = self.cutoff_ends[i], self.cutoff_ends[i + 1] __magic_name__ :Optional[Any] = self.out_layers[0].weight[l_idx:r_idx] __magic_name__ :str = self.out_layers[0].bias[l_idx:r_idx] else: __magic_name__ :Optional[int] = self.out_layers[i].weight __magic_name__ :List[str] = self.out_layers[i].bias if i == 0: __magic_name__ :Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __magic_name__ :Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__lowerCAmelCase ) biases.append(__lowerCAmelCase ) __magic_name__ , __magic_name__ , __magic_name__ :str = weights[0], biases[0], self.out_projs[0] __magic_name__ :Dict = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) __magic_name__ :Tuple = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) __magic_name__ :str = [0] + self.cutoffs for i in range(len(__lowerCAmelCase ) - 1 ): __magic_name__ , __magic_name__ :List[str] = cutoff_values[i], cutoff_values[i + 1] if i == 0: __magic_name__ :Tuple = head_logprob[:, : self.cutoffs[0]] else: __magic_name__ , __magic_name__ , __magic_name__ :Any = weights[i], biases[i], self.out_projs[i] __magic_name__ :Union[str, Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Optional[Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 ) __magic_name__ :Any = head_logprob[:, -i] + tail_logprob_i __magic_name__ :Union[str, Any] = logprob_i return out
0
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
0
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __snake_case = logging.get_logger(__name__) class __lowerCamelCase (_a ): def __init__( self: List[str],*A_: Dict,**A_: Tuple ): '''simple docstring''' warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.',A_,) super().__init__(*A_,**A_ )
1
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
0
from functools import reduce UpperCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda _snake_case , _snake_case : str(int(_snake_case ) * int(_snake_case ) ) , n[i : i + 13] ) ) for i in range(len(_snake_case ) - 12 ) ) if __name__ == "__main__": print(f'{solution() = }')
2
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
0
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : Optional[int] = True lowerCAmelCase : Union[str, Any] = False if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') lowerCAmelCase : Tuple = parser.parse_args() lowerCAmelCase : Dict = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } lowerCAmelCase : str = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } lowerCAmelCase : Optional[int] = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: lowerCAmelCase : Any = reader.read() lowerCAmelCase : Dict = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): lowerCAmelCase : List[Any] = UNetaDModel(**config) else: lowerCAmelCase : Any = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel lowerCAmelCase : Tuple = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCAmelCase : List[Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCAmelCase : str = config[key] del config[key] lowerCAmelCase : int = [k.replace('UNetRes', '') for k in config['down_block_types']] lowerCAmelCase : List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: lowerCAmelCase : List[str] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) lowerCAmelCase : Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue lowerCAmelCase : Tuple = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: lowerCAmelCase : Optional[int] = param_value lowerCAmelCase : Tuple = True if not has_changed: lowerCAmelCase : Dict = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
3
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( a__ , unittest.TestCase ): snake_case__ = DiTPipeline snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_snake_case , ) lowerCAmelCase = AutoencoderKL() lowerCAmelCase = DDIMScheduler() lowerCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe(**_snake_case ).images lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_snake_case , 1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=_snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) lowerCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] lowerCAmelCase = pipe.get_label_ids(_snake_case ) lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(_snake_case , _snake_case ): lowerCAmelCase = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) lowerCAmelCase = ['vase', 'umbrella'] lowerCAmelCase = pipe.get_label_ids(_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(_snake_case , _snake_case ): lowerCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
4
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def A (__lowerCamelCase :List[Any] ): _lowerCAmelCase = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: _lowerCAmelCase = 1024 _lowerCAmelCase = 4096 _lowerCAmelCase = 24 _lowerCAmelCase = 16 _lowerCAmelCase = [5, 11, 17, 23] _lowerCAmelCase = [256, 512, 1024, 1024] _lowerCAmelCase = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: _lowerCAmelCase = 768 _lowerCAmelCase = [1, 1, 1, 0.5] _lowerCAmelCase = [256, 512, 768, 768] _lowerCAmelCase = 150 _lowerCAmelCase = 16 _lowerCAmelCase = (1, 384, 384) _lowerCAmelCase = False _lowerCAmelCase = """project""" if "ade" in checkpoint_url: _lowerCAmelCase = True _lowerCAmelCase = 768 _lowerCAmelCase = [1, 1, 1, 0.5] _lowerCAmelCase = 150 _lowerCAmelCase = 16 _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = """ade20k-id2label.json""" _lowerCAmelCase = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) ) _lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} _lowerCAmelCase = [1, 150, 480, 480] return config, expected_shape def A (__lowerCamelCase :str ): _lowerCAmelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def A (__lowerCamelCase :Union[str, Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): _lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: _lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: _lowerCAmelCase = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: _lowerCAmelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: _lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: _lowerCAmelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: _lowerCAmelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: _lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: _lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: _lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: _lowerCAmelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: _lowerCAmelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: _lowerCAmelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: _lowerCAmelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: _lowerCAmelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: _lowerCAmelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: _lowerCAmelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 _lowerCAmelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: _lowerCAmelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: _lowerCAmelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: _lowerCAmelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: _lowerCAmelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: _lowerCAmelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: _lowerCAmelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: _lowerCAmelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: _lowerCAmelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: _lowerCAmelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: _lowerCAmelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: _lowerCAmelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: _lowerCAmelCase = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: _lowerCAmelCase = name.replace("""..""" , """.""" ) if "stem.conv" in name: _lowerCAmelCase = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: _lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: _lowerCAmelCase = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: _lowerCAmelCase = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: _lowerCAmelCase = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: _lowerCAmelCase = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: _lowerCAmelCase = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A (__lowerCamelCase :str , __lowerCamelCase :Any ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) _lowerCAmelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[: config.hidden_size, :] _lowerCAmelCase = in_proj_bias[: config.hidden_size] _lowerCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _lowerCAmelCase = in_proj_bias[-config.hidden_size :] def A (): _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def A (__lowerCamelCase :List[str] , __lowerCamelCase :int , __lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :List[str] ): _lowerCAmelCase , _lowerCAmelCase = get_dpt_config(__lowerCamelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") _lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCamelCase ) # rename keys for key in state_dict.copy().keys(): _lowerCAmelCase = state_dict.pop(__lowerCamelCase ) _lowerCAmelCase = val # read in qkv matrices read_in_q_k_v(__lowerCamelCase , __lowerCamelCase ) # load HuggingFace model _lowerCAmelCase = DPTForSemanticSegmentation(__lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() # Check outputs on an image _lowerCAmelCase = 480 if """ade""" in checkpoint_url else 384 _lowerCAmelCase = DPTImageProcessor(size=__lowerCamelCase ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(__lowerCamelCase , return_tensors="""pt""" ) # forward pass _lowerCAmelCase = model(**__lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth if show_prediction: _lowerCAmelCase = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCamelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(__lowerCamelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) parser.add_argument( """--show_prediction""", action="""store_true""", ) _lowercase = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
5
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
0
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) for i in range(1 , UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = collection[i] SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = i - 1 while low <= high: SCREAMING_SNAKE_CASE__ = (low + high) // 2 if val < collection[mid]: SCREAMING_SNAKE_CASE__ = mid - 1 else: SCREAMING_SNAKE_CASE__ = mid + 1 for j in range(UpperCamelCase__ , UpperCamelCase__ , -1 ): SCREAMING_SNAKE_CASE__ = collection[j - 1] SCREAMING_SNAKE_CASE__ = val return collection if __name__ == "__main__": _lowerCamelCase = input('Enter numbers separated by a comma:\n').strip() _lowerCamelCase = [int(item) for item in user_input.split(',')] print(binary_insertion_sort(unsorted))
6
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
0
"""simple docstring""" from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
7
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ): '''simple docstring''' __A : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} __A : Dict = parent __A : Union[str, Any] = batch_size __A : List[str] = num_channels __A : str = min_resolution __A : Optional[int] = max_resolution __A : Optional[int] = do_resize __A : Optional[int] = size __A : Any = do_normalize __A : Dict = image_mean __A : Optional[Any] = image_std __A : Optional[int] = do_rescale __A : List[Any] = rescale_factor __A : int = do_pad def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' if not batched: __A : List[Any] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image): __A ,__A : Any = image.size else: __A ,__A : List[Any] = image.shape[1], image.shape[2] if w < h: __A : Union[str, Any] = int(self.size['shortest_edge'] * h / w) __A : Dict = self.size['shortest_edge'] elif w > h: __A : Any = self.size['shortest_edge'] __A : Optional[Any] = int(self.size['shortest_edge'] * w / h) else: __A : Union[str, Any] = self.size['shortest_edge'] __A : str = self.size['shortest_edge'] else: __A : Tuple = [] for image in image_inputs: __A ,__A : int = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __A : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[0])[0] __A : int = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[1])[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ): lowerCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean')) self.assertTrue(hasattr(_UpperCAmelCase , 'image_std')) self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize')) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize')) self.assertTrue(hasattr(_UpperCAmelCase , 'do_rescale')) self.assertTrue(hasattr(_UpperCAmelCase , 'do_pad')) self.assertTrue(hasattr(_UpperCAmelCase , 'size')) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333}) self.assertEqual(image_processor.do_pad , _UpperCAmelCase) __A : int = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84}) self.assertEqual(image_processor.do_pad , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.image_processing_class(**self.image_processor_dict) # create random PIL images __A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image) # Test not batched input __A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __A ,__A : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __A ,__A : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase) __A : Dict = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray) # Test not batched input __A : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __A ,__A : Union[str, Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __A : Tuple = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values __A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor) # Test not batched input __A : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __A ,__A : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values __A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f: __A : Dict = json.loads(f.read()) __A : str = {'image_id': 3_9769, 'annotations': target} # encode them __A : List[Any] = DeformableDetrImageProcessor() __A : Optional[int] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt') # verify pixel values __A : List[str] = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase) __A : Tuple = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4)) # verify area __A : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase)) # verify boxes __A : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase) __A : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3)) # verify image_id __A : Tuple = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase)) # verify is_crowd __A : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase)) # verify class_labels __A : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase)) # verify orig_size __A : Any = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase)) # verify size __A : Dict = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase)) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f: __A : Optional[int] = json.loads(f.read()) __A : int = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} __A : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __A : str = DeformableDetrImageProcessor(format='coco_panoptic') __A : Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt') # verify pixel values __A : str = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase) __A : List[str] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4)) # verify area __A : Any = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase)) # verify boxes __A : Optional[int] = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase) __A : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3)) # verify image_id __A : List[Any] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase)) # verify is_crowd __A : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase)) # verify class_labels __A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase)) # verify masks __A : Dict = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase) # verify orig_size __A : Optional[int] = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase)) # verify size __A : int = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase))
8
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : int = ["input_features"] def __init__( self : Dict , _snake_case : str=80 , _snake_case : List[Any]=1_60_00 , _snake_case : str=1_60 , _snake_case : Any=30 , _snake_case : Dict=4_00 , _snake_case : int=0.0 , _snake_case : Tuple=False , **_snake_case : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , ) A__ = n_fft A__ = hop_length A__ = chunk_length A__ = chunk_length * sampling_rate A__ = self.n_samples // hop_length A__ = sampling_rate A__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , ) def _a ( self : List[str] , _snake_case : np.array ): """simple docstring""" A__ = spectrogram( _snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , ) A__ = log_spec[:, :-1] A__ = np.maximum(_snake_case , log_spec.max() - 8.0 ) A__ = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ): """simple docstring""" if attention_mask is not None: A__ = np.array(_snake_case , np.intaa ) A__ = [] for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ): A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: A__ = padding_value normed_input_values.append(_snake_case ) else: A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : str , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = True , _snake_case : Optional[int] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[str] = "max_length" , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , **_snake_case : Optional[int] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) A__ = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) A__ = is_batched_numpy or ( isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_snake_case , np.ndarray ): A__ = np.asarray(_snake_case , dtype=np.floataa ) elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A__ = [np.asarray([raw_speech] ).T] A__ = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding A__ = self.pad( _snake_case , padding=_snake_case , max_length=max_length if max_length else self.n_samples , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: A__ = self.zero_mean_unit_var_norm( padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , ) A__ = np.stack(padded_inputs['input_features'] , axis=0 ) # make sure list is in array format A__ = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 ) A__ = [self._np_extract_fbank_features(_snake_case ) for waveform in input_features[0]] if isinstance(input_features[0] , _snake_case ): A__ = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features] else: A__ = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) A__ = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: A__ = padded_inputs.convert_to_tensors(_snake_case ) return padded_inputs def _a ( self : Optional[Any] ): """simple docstring""" A__ = copy.deepcopy(self.__dict__ ) A__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
9
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
0
def _snake_case ( __snake_case ): _UpperCamelCase = [0] * len(__snake_case ) for i in range(1 , len(__snake_case ) ): # use last results for better performance - dynamic programming _UpperCamelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _UpperCamelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _UpperCamelCase = j return prefix_result def _snake_case ( __snake_case ): return max(prefix_function(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod()
10
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
0
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging lowercase_ = logging.get_logger(__name__) class __A ( A ): '''simple docstring''' def __init__(self , A=None , **A ) -> str: """simple docstring""" warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , A , ) super().__init__(args=A , **A )
11
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
0
from __future__ import annotations import time lowerCamelCase__ : List[str] = list[tuple[int, int]] lowerCamelCase__ : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase__ : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = pos_x lowercase__ : Optional[int] = pos_y lowercase__ : List[Any] = (pos_y, pos_x) lowercase__ : Optional[Any] = goal_x lowercase__ : Union[str, Any] = goal_y lowercase__ : Any = parent class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_) lowercase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [self.start] lowercase__ : int = False def lowercase__ ( self): '''simple docstring''' while self.node_queue: lowercase__ : List[Any] = self.node_queue.pop(0) if current_node.pos == self.target.pos: lowercase__ : Union[str, Any] = True return self.retrace_path(SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_successors(SCREAMING_SNAKE_CASE_) for node in successors: self.node_queue.append(SCREAMING_SNAKE_CASE_) if not self.reached: return [self.start.pos] return None def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = [] for action in delta: lowercase__ : Optional[Any] = parent.pos_x + action[1] lowercase__ : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , SCREAMING_SNAKE_CASE_)) return successors def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = node lowercase__ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) lowercase__ : Tuple = current_node.parent path.reverse() return path class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = False def lowercase__ ( self): '''simple docstring''' while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: lowercase__ : Optional[int] = self.fwd_bfs.node_queue.pop(0) lowercase__ : Optional[Any] = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: lowercase__ : Optional[Any] = True return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = current_bwd_node lowercase__ : str = current_fwd_node lowercase__ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_), self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(SCREAMING_SNAKE_CASE_) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_) bwd_path.pop() bwd_path.reverse() lowercase__ : Optional[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase__ : Optional[Any] = (0, 0) lowerCamelCase__ : Tuple = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase__ : Tuple = time.time() lowerCamelCase__ : Union[str, Any] = BreadthFirstSearch(init, goal) lowerCamelCase__ : List[Any] = bfs.search() lowerCamelCase__ : str = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) lowerCamelCase__ : Tuple = time.time() lowerCamelCase__ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase__ : Union[str, Any] = bd_bfs.search() lowerCamelCase__ : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
12
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError('check_bouncy() accepts only integer arguments' ) __lowerCamelCase : Optional[int] = str(UpperCAmelCase_ ) __lowerCamelCase : List[str] = ''.join(sorted(UpperCAmelCase_ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def UpperCAmelCase__ ( UpperCAmelCase_ : float = 99 ) -> int: if not 0 < percent < 1_00: raise ValueError('solution() only accepts values from 0 to 100' ) __lowerCamelCase : Tuple = 0 __lowerCamelCase : int = 1 while True: if check_bouncy(UpperCAmelCase_ ): bouncy_num += 1 if (bouncy_num / num) * 1_00 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
13
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
0
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : Optional[int] = LEDConfig UpperCAmelCase__ : Union[str, Any] = {} UpperCAmelCase__ : Dict = "gelu" def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=False , _a=9_9 , _a=3_2 , _a=2 , _a=4 , _a=3_7 , _a=0.1 , _a=0.1 , _a=2_0 , _a=2 , _a=1 , _a=0 , _a=4 , ) -> Dict: _a : Optional[Any] = parent _a : str = batch_size _a : Dict = seq_length _a : Any = is_training _a : Any = use_labels _a : List[Any] = vocab_size _a : Tuple = hidden_size _a : Dict = num_hidden_layers _a : str = num_attention_heads _a : List[Any] = intermediate_size _a : Any = hidden_dropout_prob _a : Any = attention_probs_dropout_prob _a : Tuple = max_position_embeddings _a : List[str] = eos_token_id _a : Any = pad_token_id _a : int = bos_token_id _a : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _a : Dict = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _a : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __lowercase ( self ) -> Optional[Any]: _a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _a : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _a : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) _a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _a : Optional[int] = prepare_led_inputs_dict(_a , _a , _a ) _a : int = tf.concat( [tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , ) _a : str = global_attention_mask return config, inputs_dict def __lowercase ( self , _a , _a ) -> Any: _a : Dict = TFLEDModel(config=_a ).get_decoder() _a : Any = inputs_dict['''input_ids'''] _a : str = input_ids[:1, :] _a : Optional[int] = inputs_dict['''attention_mask'''][:1, :] _a : Optional[Any] = 1 # first forward pass _a : int = model(_a , attention_mask=_a , use_cache=_a ) _a , _a : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) _a : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _a : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 ) _a : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _a : int = model(_a , attention_mask=_a )[0] _a : Optional[Any] = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _a : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _a : List[str] = output_from_no_past[:, -3:, random_slice_idx] _a : Tuple = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def __UpperCAmelCase ( __a : int ,__a : str ,__a : Optional[Any] ,__a : Optional[int]=None ,__a : Optional[int]=None ,__a : Dict=None ,__a : List[Any]=None ,) -> str: """simple docstring""" if attention_mask is None: _a : Optional[int] = tf.cast(tf.math.not_equal(__a ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: _a : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: _a : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _a : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCAmelCase__ : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ : List[Any] = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ : str = True UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Dict = False def __lowercase ( self ) -> Optional[Any]: _a : Dict = TFLEDModelTester(self ) _a : Any = ConfigTester(self , config_class=_a ) def __lowercase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def __lowercase ( self ) -> int: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) def __lowercase ( self ) -> str: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _a : int = tf.zeros_like(inputs_dict['''attention_mask'''] ) _a : Tuple = 2 _a : Optional[Any] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _a : Tuple = True _a : Union[str, Any] = self.model_tester.seq_length _a : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_a ): _a : List[str] = outputs.decoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_a ): _a : Optional[int] = [t.numpy() for t in outputs.encoder_attentions] _a : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _a : List[Any] = True _a : str = False _a : Any = False _a : List[Any] = model_class(_a ) _a : Union[str, Any] = model(self._prepare_for_class(_a , _a ) ) _a : Any = len(_a ) self.assertEqual(config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) if self.is_encoder_decoder: _a : str = model_class(_a ) _a : Dict = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(config.output_hidden_states , _a ) check_decoder_attentions_output(_a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _a : Union[str, Any] = True _a : Tuple = model_class(_a ) _a : Optional[Any] = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) # Check attention is always last and order is fine _a : Tuple = True _a : Dict = True _a : Union[str, Any] = model_class(_a ) _a : Optional[Any] = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) ) self.assertEqual(model.config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def __lowercase ( self ) -> Union[str, Any]: pass def __lowercase ( self ) -> List[str]: # TODO: Head-masking not yet implement pass def __UpperCAmelCase ( __a : Tuple ) -> Any: """simple docstring""" return tf.constant(__a ,dtype=tf.intaa ) a__ = 1E-4 @slow @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> List[Any]: _a : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _a : str = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) _a : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) _a : Any = prepare_led_inputs_dict(model.config , _a , _a ) _a : Optional[int] = model(**_a )[0] _a : str = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape , _a ) # change to expected output here _a : str = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 ) def __lowercase ( self ) -> str: _a : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _a : List[str] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) _a : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) _a : Union[str, Any] = prepare_led_inputs_dict(model.config , _a , _a ) _a : Union[str, Any] = model(**_a )[0] _a : Optional[int] = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape , _a ) # change to expected output here _a : List[str] = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
14
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
0
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" lowercase__ = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): lowercase__ = n - k # Calculate C(n,k) for i in range(__magic_name__ ): result *= n - i result //= i + 1 return result def UpperCamelCase ( __magic_name__ : int ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , __magic_name__ ) // (node_count + 1) def UpperCamelCase ( __magic_name__ : int ) -> int: """simple docstring""" if n < 0: raise ValueError("""factorial() not defined for negative values""" ) lowercase__ = 1 for i in range(1 , n + 1 ): result *= i return result def UpperCamelCase ( __magic_name__ : int ) -> int: """simple docstring""" return catalan_number(__magic_name__ ) * factorial(__magic_name__ ) if __name__ == "__main__": A : Tuple = int(input('Enter the number of nodes: ').strip() or 0) if node_count <= 0: raise ValueError('We need some nodes to work with.') print( F'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' F'binary trees and {catalan_number(node_count)} binary search trees.' )
15
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Optional[Any] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : str ): return len(self.encoder ) def _snake_case ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Tuple , __lowerCamelCase : Dict ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : int , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
16
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class lowerCamelCase_ ( _lowercase ): @add_start_docstrings(__A ) def __call__( self : str , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[Any] ): raise NotImplementedError("""StoppingCriteria needs to be subclassed""" ) class lowerCamelCase_ ( _lowercase ): def __init__( self : Union[str, Any] , __A : int , __A : Optional[int] = None ): __A : Optional[int] = max_length __A : Optional[int] = max_position_embeddings @add_start_docstrings(__A ) def __call__( self : Union[str, Any] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[int] ): __A : Optional[Any] = input_ids.shape[-1] __A : Union[str, Any] = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( """This is a friendly reminder - the current text generation call will exceed the model's predefined """ F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ """exceptions, performance degradation, or nothing at all.""" ) return is_done class lowerCamelCase_ ( _lowercase ): def __init__( self : List[str] , __A : int , __A : int ): warnings.warn( """The class `MaxNewTokensCriteria` is deprecated. """ F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ """with `max_length = start_length + max_new_tokens` instead.""" , __A , ) __A : Dict = start_length __A : Optional[int] = max_new_tokens __A : Tuple = start_length + max_new_tokens @add_start_docstrings(__A ) def __call__( self : Tuple , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : str ): return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _lowercase ): def __init__( self : int , __A : float , __A : Optional[float] = None ): __A : Optional[int] = max_time __A : int = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__A ) def __call__( self : int , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[int] ): return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _lowercase ): @add_start_docstrings(__A ) def __call__( self : List[str] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ): return any(criteria(__A , __A ) for criteria in self ) @property def lowerCAmelCase_ ( self : int ): for stopping_criterium in self: if isinstance(__A , __A ): return stopping_criterium.max_length elif isinstance(__A , __A ): return stopping_criterium.max_length return None def __SCREAMING_SNAKE_CASE ( a__ : StoppingCriteriaList ,a__ : int ) -> StoppingCriteriaList: __A : int = stopping_criteria.max_length __A : Optional[int] = deepcopy(a__ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" ,a__ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=a__ ) ) return new_stopping_criteria
17
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
0
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __a(SCREAMING_SNAKE_CASE_ : Namespace ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _SCREAMING_SNAKE_CASE = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class lowerCAmelCase_ ( __magic_name__ ): @staticmethod def _snake_case ( _lowerCAmelCase ) -> str: _lowerCAmelCase = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=_lowerCAmelCase , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=_lowerCAmelCase ) def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , ) -> Dict: _lowerCAmelCase = logging.get_logger("transformers-cli/converting" ) self._logger.info(f'''Loading model {model_type}''' ) _lowerCAmelCase = model_type _lowerCAmelCase = tf_checkpoint _lowerCAmelCase = pytorch_dump_output _lowerCAmelCase = config _lowerCAmelCase = finetuning_task_name def _snake_case ( self ) -> str: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) if "ckpt" in self._tf_checkpoint.lower(): _lowerCAmelCase = self._tf_checkpoint _lowerCAmelCase = "" else: _lowerCAmelCase = self._tf_checkpoint _lowerCAmelCase = "" convert_transfo_xl_checkpoint_to_pytorch( _lowerCAmelCase , self._config , self._pytorch_dump_output , _lowerCAmelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCAmelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
18
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 42 lowercase__ = 42 def __init__( self , __a , __a) -> str: '''simple docstring''' super().__init__() self.register_modules(unet=__a , scheduler=__a) @torch.no_grad() def __call__( self , __a = 1 , __a = 20_00 , __a = None , __a = "pil" , __a = True , **__a , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' _UpperCamelCase = self.unet.config.sample_size _UpperCamelCase = (batch_size, 3, img_size, img_size) _UpperCamelCase = self.unet _UpperCamelCase = randn_tensor(__a , generator=__a) * self.scheduler.init_noise_sigma _UpperCamelCase = sample.to(self.device) self.scheduler.set_timesteps(__a) self.scheduler.set_sigmas(__a) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): _UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): _UpperCamelCase = self.unet(__a , __a).sample _UpperCamelCase = self.scheduler.step_correct(__a , __a , generator=__a).prev_sample # prediction step _UpperCamelCase = model(__a , __a).sample _UpperCamelCase = self.scheduler.step_pred(__a , __a , __a , generator=__a) _UpperCamelCase , _UpperCamelCase = output.prev_sample, output.prev_sample_mean _UpperCamelCase = sample_mean.clamp(0 , 1) _UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _UpperCamelCase = self.numpy_to_pil(__a) if not return_dict: return (sample,) return ImagePipelineOutput(images=__a)
19
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
0
_lowerCAmelCase: Optional[Any] = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def _lowercase( __a : float ): assert type(__a ) in (int, float) and decimal == int(__a ) a__ =int(__a ) a__ ='' a__ =False if decimal < 0: a__ =True decimal *= -1 while decimal > 0: a__ , a__ =divmod(__a , 16 ) a__ =values[remainder] + hexadecimal a__ ='0x' + hexadecimal if negative: a__ ='-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
20
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __A ( unittest.TestCase ): @slow def A__ ( self :Tuple ): '''simple docstring''' __magic_name__ : Union[str, Any] =FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) __magic_name__ : Optional[Any] =AutoTokenizer.from_pretrained("""google/mt5-small""" ) __magic_name__ : List[str] =tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids __magic_name__ : Tuple =tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids __magic_name__ : List[str] =shift_tokens_right(__snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) __magic_name__ : Tuple =model(__snake_case , decoder_input_ids=__snake_case ).logits __magic_name__ : Union[str, Any] =optax.softmax_cross_entropy(__snake_case , onehot(__snake_case , logits.shape[-1] ) ).mean() __magic_name__ : Union[str, Any] =-(labels.shape[-1] * loss.item()) __magic_name__ : List[Any] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
21
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
0
'''simple docstring''' import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def snake_case_ (UpperCamelCase : Optional[Any] ): '''simple docstring''' _a = model.config _a = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) _a = MBartConfig( is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , ) return encoder_config, decoder_config def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' if "encoder.model" in name: _a = name.replace('''encoder.model''' , '''encoder''' ) if "decoder.model" in name: _a = name.replace('''decoder.model''' , '''decoder''' ) if "patch_embed.proj" in name: _a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if name.startswith('''encoder''' ): if "layers" in name: _a = '''encoder.''' + name if "attn.proj" in name: _a = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "mask" not in name: _a = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _a = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _a = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _a = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _a = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": _a = '''encoder.layernorm.weight''' if name == "encoder.norm.bias": _a = '''encoder.layernorm.bias''' return name def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _a = orig_state_dict.pop(UpperCamelCase ) if "qkv" in key: _a = key.split('''.''' ) _a = int(key_split[3] ) _a = int(key_split[5] ) _a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _a = val[:dim, :] _a = val[dim : dim * 2, :] _a = val[-dim:, :] else: _a = val[:dim] _a = val[dim : dim * 2] _a = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: _a = val return orig_state_dict def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Tuple=None , UpperCamelCase : List[str]=False ): '''simple docstring''' _a = DonutModel.from_pretrained(UpperCamelCase ).eval() # load HuggingFace model _a , _a = get_configs(UpperCamelCase ) _a = DonutSwinModel(UpperCamelCase ) _a = MBartForCausalLM(UpperCamelCase ) _a = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase ) model.eval() _a = original_model.state_dict() _a = convert_state_dict(UpperCamelCase , UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # verify results on scanned document _a = load_dataset('''hf-internal-testing/example-documents''' ) _a = dataset['''test'''][0]['''image'''].convert('''RGB''' ) _a = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase ) _a = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) _a = DonutProcessor(UpperCamelCase , UpperCamelCase ) _a = processor(UpperCamelCase , return_tensors='''pt''' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": _a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' _a = '''When is the coffee break?''' _a = task_prompt.replace('''{user_input}''' , UpperCamelCase ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": _a = '''<s_rvlcdip>''' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: _a = '''<s_cord>''' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": _a = '''s_cord-v2>''' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": _a = '''<s_zhtrainticket>''' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt _a = '''hello world''' else: raise ValueError('''Model name not supported''' ) _a = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='''pt''' )[ '''input_ids''' ] _a = original_model.encoder.model.patch_embed(UpperCamelCase ) _a , _a = model.encoder.embeddings(UpperCamelCase ) assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) # verify encoder hidden states _a = original_model.encoder(UpperCamelCase ) _a = model.encoder(UpperCamelCase ).last_hidden_state assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 ) # verify decoder hidden states _a = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits _a = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) if push_to_hub: model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' ) processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' ) if __name__ == "__main__": _snake_case : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _snake_case : Union[str, Any] = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
22
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = is_training UpperCamelCase_ = use_auxiliary_loss UpperCamelCase_ = num_queries UpperCamelCase_ = num_channels UpperCamelCase_ = min_size UpperCamelCase_ = max_size UpperCamelCase_ = num_labels UpperCamelCase_ = hidden_dim UpperCamelCase_ = hidden_dim def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCAmelCase ) UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase ) UpperCamelCase_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5 ).float() UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long() UpperCamelCase_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCamelCase_ = self.num_queries UpperCamelCase_ = self.num_labels UpperCamelCase_ = [1, 1, 1, 1] UpperCamelCase_ = self.num_channels UpperCamelCase_ = 64 UpperCamelCase_ = 128 UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim return config def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = output.encoder_hidden_states UpperCamelCase_ = output.pixel_decoder_hidden_states UpperCamelCase_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: with torch.no_grad(): UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() def comm_check_on_output(_UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) UpperCamelCase_ = model( pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> int: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = (self.model_tester.min_size,) * 2 UpperCamelCase_ = { 'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ), 'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(), } UpperCamelCase_ = self.model_tester.get_config() UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _UpperCAmelCase ( self ) -> List[Any]: if not self.model_tester.is_training: return UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) UpperCamelCase_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case__ : List[Any] = 1E-4 def _snake_case (): UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_vision @slow class _a ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) UpperCamelCase_ = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) # masks_queries_logits UpperCamelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCamelCase_ = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) # class_queries_logits UpperCamelCase_ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCamelCase_ = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase ) UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']] UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']] with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
23
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
0
'''simple docstring''' def _UpperCamelCase (_lowerCamelCase : list[int] , _lowerCamelCase : list[int] )-> None: '''simple docstring''' __snake_case = len(_lowerCamelCase ) print('''The following activities are selected:''' ) # The first activity is always selected __snake_case = 0 print(_lowerCamelCase , end=''',''' ) # Consider rest of the activities for j in range(_lowerCamelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_lowerCamelCase , end=''',''' ) __snake_case = j if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : int = [1, 3, 0, 5, 8, 5] UpperCAmelCase_ : Union[str, Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
24
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
0
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def lowerCamelCase__ ( _a=None , _a=None): return field(default_factory=lambda: default , metadata=_a) @dataclass class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =field( metadata={'help': 'The csv file to plot.'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Disable logarithmic scale when plotting'} , ) lowerCamelCase__ =field( default=__A , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) lowerCamelCase__ =list_field( default=__A , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def lowerCamelCase__ ( _a): try: int(_a) return True except ValueError: return False def lowerCamelCase__ ( _a): try: float(_a) return True except ValueError: return False class _UpperCamelCase : '''simple docstring''' def __init__( self : Tuple , a : Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = args SCREAMING_SNAKE_CASE : int = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="" ) as csv_file: SCREAMING_SNAKE_CASE : Optional[Any] = csv.DictReader(a ) for row in reader: SCREAMING_SNAKE_CASE : Optional[Any] = row["model"] self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) ) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) ) if can_convert_to_int(row["result"] ): # value is not None SCREAMING_SNAKE_CASE : Tuple = int(row["result"] ) elif can_convert_to_float(row["result"] ): # value is not None SCREAMING_SNAKE_CASE : str = float(row["result"] ) def __UpperCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = plt.subplots() SCREAMING_SNAKE_CASE : Tuple = "Time usage" if self.args.is_time else "Memory usage" SCREAMING_SNAKE_CASE : Union[str, Any] = title_str + " for training" if self.args.is_train else title_str + " for inference" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("log" ) ax.set_yscale("log" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): SCREAMING_SNAKE_CASE : str = sorted(set(self.result_dict[model_name]["bsz"] ) ) SCREAMING_SNAKE_CASE : Any = sorted(set(self.result_dict[model_name]["seq_len"] ) ) SCREAMING_SNAKE_CASE : Any = self.result_dict[model_name]["result"] ((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : str = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) SCREAMING_SNAKE_CASE : Optional[Any] = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: SCREAMING_SNAKE_CASE : List[str] = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a , ) else: SCREAMING_SNAKE_CASE : Tuple = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Union[str, Any] = ( ("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz") ) SCREAMING_SNAKE_CASE : str = np.asarray(a , a )[: len(a )] plt.scatter( a , a , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" ) plt.plot(a , a , "--" ) title_str += F" {label_model_name} vs." SCREAMING_SNAKE_CASE : List[Any] = title_str[:-4] SCREAMING_SNAKE_CASE : Optional[int] = "Time in s" if self.args.is_time else "Memory in MB" # plot plt.title(a ) plt.xlabel(a ) plt.ylabel(a ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Any = HfArgumentParser(_a) SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()[0] SCREAMING_SNAKE_CASE : Any = Plot(args=_a) plot.plot() if __name__ == "__main__": main()
25
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _A ( __lowercase ): lowercase__: Any = '''decision_transformer''' lowercase__: Union[str, Any] = ['''past_key_values'''] lowercase__: List[str] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Tuple , __magic_name__ : Optional[Any]=17 , __magic_name__ : Any=4 , __magic_name__ : Union[str, Any]=1_28 , __magic_name__ : Optional[Any]=40_96 , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=1 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Tuple=1 , __magic_name__ : Dict=None , __magic_name__ : Tuple="relu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=1E-5 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=True , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]=5_02_56 , __magic_name__ : Optional[Any]=5_02_56 , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=False , **__magic_name__ : int , ) -> int: """simple docstring""" __snake_case : str = state_dim __snake_case : Optional[Any] = act_dim __snake_case : Dict = hidden_size __snake_case : int = max_ep_len __snake_case : Any = action_tanh __snake_case : Union[str, Any] = vocab_size __snake_case : Optional[Any] = n_positions __snake_case : Optional[int] = n_layer __snake_case : List[Any] = n_head __snake_case : Tuple = n_inner __snake_case : int = activation_function __snake_case : List[Any] = resid_pdrop __snake_case : Union[str, Any] = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Optional[Any] = initializer_range __snake_case : str = scale_attn_weights __snake_case : str = use_cache __snake_case : List[Any] = scale_attn_by_inverse_layer_idx __snake_case : Dict = reorder_and_upcast_attn __snake_case : Any = bos_token_id __snake_case : Dict = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
26
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
0
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowerCamelCase( __snake_case ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=64 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=4 , snake_case_=1 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = q_groups _A = k_groups _A = v_groups _A = post_attention_groups _A = intermediate_groups _A = output_groups def lowerCAmelCase__ ( self ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = SqueezeBertModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , snake_case_ ) _A = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = SqueezeBertForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = SqueezeBertForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model( snake_case_ , attention_mask=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = SqueezeBertForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = SqueezeBertForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_choices _A = SqueezeBertForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( snake_case_ , attention_mask=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self ): _A = self.prepare_config_and_inputs() ((_A), (_A), (_A), (_A), (_A), (_A)) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __magic_name__ = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = True __magic_name__ = False def lowerCAmelCase__ ( self ): _A = SqueezeBertModelTester(self ) _A = ConfigTester(self , config_class=snake_case_ , dim=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case_ ) @slow def lowerCAmelCase__ ( self ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = SqueezeBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): _A = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) _A = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] ) _A = model(snake_case_ )[0] _A = torch.Size((1, 3) ) self.assertEqual(output.shape , snake_case_ ) _A = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-4 ) )
27
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
0
'''simple docstring''' from typing import Any def lowercase__( __UpperCamelCase: list ): """simple docstring""" if not input_list: return [] SCREAMING_SNAKE_CASE : Union[str, Any] = [input_list.count(__UpperCamelCase ) for value in input_list] SCREAMING_SNAKE_CASE : Any = max(__UpperCamelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(__UpperCamelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
0
"""simple docstring""" from __future__ import annotations def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( lowerCAmelCase__ ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
29
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ): '''simple docstring''' UpperCAmelCase_ : Dict = {'''add_prefix_space''': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(''' ''' ) else {} UpperCAmelCase_ : List[str] = padding_side return tokenizer( [line] , max_length=_lowercase , padding='''max_length''' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , ): '''simple docstring''' UpperCAmelCase_ : str = input_ids.ne(_lowercase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="train" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="" ,) -> Tuple: super().__init__() UpperCAmelCase_ : Optional[Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.source''' ) UpperCAmelCase_ : List[Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.target''' ) UpperCAmelCase_ : Tuple = self.get_char_lens(self.src_file ) UpperCAmelCase_ : Any = max_source_length UpperCAmelCase_ : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' UpperCAmelCase_ : str = tokenizer UpperCAmelCase_ : Union[str, Any] = prefix if n_obs is not None: UpperCAmelCase_ : Dict = self.src_lens[:n_obs] UpperCAmelCase_ : Union[str, Any] = src_lang UpperCAmelCase_ : Any = tgt_lang def __len__( self ) -> Any: return len(self.src_lens ) def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]: UpperCAmelCase_ : Any = index + 1 # linecache starts at 1 UpperCAmelCase_ : int = self.prefix + linecache.getline(str(self.src_file ) ,_SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) UpperCAmelCase_ : int = linecache.getline(str(self.tgt_file ) ,_SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase_ : Tuple = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer ) UpperCAmelCase_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer UpperCAmelCase_ : Tuple = encode_line(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.max_source_length ,'''right''' ) UpperCAmelCase_ : Tuple = encode_line(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.max_target_length ,'''right''' ) UpperCAmelCase_ : Optional[Any] = source_inputs['''input_ids'''].squeeze() UpperCAmelCase_ : Optional[Any] = target_inputs['''input_ids'''].squeeze() UpperCAmelCase_ : Tuple = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]: UpperCAmelCase_ : int = torch.stack([x['''input_ids'''] for x in batch] ) UpperCAmelCase_ : Dict = torch.stack([x['''attention_mask'''] for x in batch] ) UpperCAmelCase_ : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] ) UpperCAmelCase_ : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) UpperCAmelCase_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) UpperCAmelCase_ : str = trim_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = trim_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch __a = getLogger(__name__) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return list(itertools.chain.from_iterable(_lowercase ) ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = get_git_info() save_json(_lowercase , os.path.join(_lowercase , '''git_log.json''' ) ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ): '''simple docstring''' with open(_lowercase , '''w''' ) as f: json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' with open(_lowercase ) as f: return json.load(_lowercase ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = git.Repo(search_parent_directories=_lowercase ) UpperCAmelCase_ : Union[str, Any] = { '''repo_id''': str(_lowercase ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' return list(map(_lowercase , _lowercase ) ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with open(_lowercase , '''wb''' ) as f: return pickle.dump(_lowercase , _lowercase ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' def remove_articles(_lowercase ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _lowercase ) def white_space_fix(_lowercase ): return " ".join(text.split() ) def remove_punc(_lowercase ): UpperCAmelCase_ : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = normalize_answer(_lowercase ).split() UpperCAmelCase_ : List[Any] = normalize_answer(_lowercase ).split() UpperCAmelCase_ : Any = Counter(_lowercase ) & Counter(_lowercase ) UpperCAmelCase_ : Tuple = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase_ : str = 1.0 * num_same / len(_lowercase ) UpperCAmelCase_ : Union[str, Any] = 1.0 * num_same / len(_lowercase ) UpperCAmelCase_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' return normalize_answer(_lowercase ) == normalize_answer(_lowercase ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' assert len(_lowercase ) == len(_lowercase ) UpperCAmelCase_ : Optional[int] = 0 for hypo, pred in zip(_lowercase , _lowercase ): em += exact_match_score(_lowercase , _lowercase ) if len(_lowercase ) > 0: em /= len(_lowercase ) return {"em": em} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return model_prefix.startswith('''rag''' ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase_ : Dict = '''dropout_rate''' for p in extra_params: if getattr(_lowercase , _lowercase , _lowercase ): if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(_lowercase ) ) delattr(_lowercase , _lowercase ) continue UpperCAmelCase_ : Union[str, Any] = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p] setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) ) delattr(_lowercase , _lowercase ) return hparams, config
30
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
0
from __future__ import annotations from collections import namedtuple def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ) -> tuple: SCREAMING_SNAKE_CASE_ = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = tempfile.mkdtemp() # fmt: off _UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCAmelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCAmelCase = os.path.join(self.tmpdirname , _UpperCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self , **_UpperCamelCase ): return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def UpperCamelCase( self , **_UpperCamelCase ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def UpperCamelCase( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase( self ): _UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase( self ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(images=_UpperCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = processor(text=_UpperCamelCase ) _UpperCAmelCase = tokenizer(_UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_UpperCamelCase ): processor() def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(_UpperCamelCase ) _UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
32
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: snake_case__ = args.pruning_method snake_case__ = args.threshold snake_case__ = args.model_name_or_path.rstrip('''/''' ) snake_case__ = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) snake_case__ = torch.load(os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) ) snake_case__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: snake_case__ = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: snake_case__ = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: snake_case__ = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": snake_case__ = MagnitudeBinarizer.apply(inputs=__lowerCAmelCase , threshold=__lowerCAmelCase ) snake_case__ = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue snake_case__ = name[:-6] snake_case__ = model[F"""{prefix_}mask_scores"""] snake_case__ = TopKBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue snake_case__ = name[:-6] snake_case__ = model[F"""{prefix_}mask_scores"""] snake_case__ = ThresholdBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) snake_case__ = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue snake_case__ = name[:-6] snake_case__ = model[F"""{prefix_}mask_scores"""] snake_case__ , snake_case__ = -0.1, 1.1 snake_case__ = torch.sigmoid(__lowerCAmelCase ) snake_case__ = s * (r - l) + l snake_case__ = s_bar.clamp(min=0.0 , max=1.0 ) snake_case__ = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: snake_case__ = os.path.join( os.path.dirname(__lowerCAmelCase ) , F"""bertarized_{os.path.basename(__lowerCAmelCase )}""" ) if not os.path.isdir(__lowerCAmelCase ): shutil.copytree(__lowerCAmelCase , __lowerCAmelCase ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) lowerCamelCase__ : Optional[int] = parser.parse_args() main(args)
33
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
0
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class snake_case_ : """simple docstring""" A_ = None def UpperCAmelCase__ ( self) -> Optional[Any]: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) UpperCamelCase = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , lowerCamelCase_) def UpperCAmelCase__ ( self) -> int: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , '''feat_extract.json''') feat_extract_first.to_json_file(lowerCamelCase_) UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase_) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict()) def UpperCAmelCase__ ( self) -> int: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase_)[0] check_json_file_has_correct_format(lowerCamelCase_) UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase_) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict()) def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = self.feature_extraction_class() self.assertIsNotNone(lowerCamelCase_)
34
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ :Union[str, Any] = { 'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'], 'tokenization_roformer': ['RoFormerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :Union[str, Any] = ['RoFormerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :int = [ 'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoFormerForCausalLM', 'RoFormerForMaskedLM', 'RoFormerForMultipleChoice', 'RoFormerForQuestionAnswering', 'RoFormerForSequenceClassification', 'RoFormerForTokenClassification', 'RoFormerLayer', 'RoFormerModel', 'RoFormerPreTrainedModel', 'load_tf_weights_in_roformer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[Any] = [ 'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRoFormerForCausalLM', 'TFRoFormerForMaskedLM', 'TFRoFormerForMultipleChoice', 'TFRoFormerForQuestionAnswering', 'TFRoFormerForSequenceClassification', 'TFRoFormerForTokenClassification', 'TFRoFormerLayer', 'TFRoFormerModel', 'TFRoFormerPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[Any] = [ 'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxRoFormerForMaskedLM', 'FlaxRoFormerForMultipleChoice', 'FlaxRoFormerForQuestionAnswering', 'FlaxRoFormerForSequenceClassification', 'FlaxRoFormerForTokenClassification', 'FlaxRoFormerModel', 'FlaxRoFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
35
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
0
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowercase ( __A : Union[str, Any] , __A : Any , __A : Any=0 ) -> Any: '''simple docstring''' if name is None: snake_case : str = None else: snake_case : int = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" snake_case : Tuple = fmt.format(__A ) # Print and recurse (if needed). if isinstance(__A , __A ): if msg is not None: print(__A ) for k in val.keys(): recursive_print(__A , val[k] , spaces + 2 ) elif isinstance(__A , torch.Tensor ): print(__A , """:""" , val.size() ) else: print(__A , """:""" , __A ) def lowercase ( __A : Any , __A : Dict , __A : str , __A : Tuple , __A : Optional[int] ) -> List[str]: '''simple docstring''' snake_case : int = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] snake_case : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:] snake_case : Optional[int] = param.view(*__A ) snake_case : Dict = param.transpose(0 , 2 ) snake_case : int = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] snake_case : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:] snake_case : Union[str, Any] = param.view(*__A ) snake_case : Dict = param.transpose(0 , 1 ).contiguous() snake_case : Any = param.view(*__A ) return param def lowercase ( __A : str , __A : Dict , __A : str ) -> List[Any]: '''simple docstring''' snake_case : Dict = {} # old versions did not store training args snake_case : int = input_state_dict.get("""args""" , __A ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) snake_case : Dict = ds_args.padded_vocab_size snake_case : Dict = ds_args.max_position_embeddings snake_case : Optional[Any] = ds_args.hidden_size snake_case : List[str] = ds_args.num_layers snake_case : str = ds_args.num_attention_heads snake_case : Optional[Any] = ds_args.ffn_hidden_size # pprint(config) # The number of heads. snake_case : List[str] = config.n_head # The hidden_size per head. snake_case : List[Any] = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): snake_case : List[Any] = input_state_dict["""checkpoint_version"""] else: snake_case : int = 0.0 # The model. snake_case : Optional[int] = input_state_dict["""model"""] # The language model. snake_case : Union[str, Any] = model["""language_model"""] # The embeddings. snake_case : List[str] = lm["""embedding"""] # The word embeddings. snake_case : Any = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. snake_case : Union[str, Any] = word_embeddings[: config.vocab_size, :] snake_case : str = word_embeddings # The position embeddings. snake_case : Tuple = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] snake_case : Optional[Any] = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" ) # Store the position embeddings. snake_case : Tuple = pos_embeddings # The transformer. snake_case : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. snake_case : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. snake_case : str = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. snake_case : Optional[Any] = layer_re.match(__A ) # Stop if that's not a layer if m is None: break # The index of the layer. snake_case : List[str] = int(m.group(1 ) ) # The name of the operation. snake_case : Any = m.group(2 ) # Is it a weight or a bias? snake_case : Any = m.group(3 ) # The name of the layer. snake_case : Tuple = f"""transformer.h.{layer_idx}""" # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): snake_case : Any = """ln_1""" if op_name.startswith("""input""" ) else """ln_2""" snake_case : Any = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. snake_case : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __A , __A ) snake_case : List[str] = causal_mask # Insert a "dummy" tensor for masked_bias. snake_case : Optional[int] = torch.tensor(-1E4 , dtype=torch.floataa ) snake_case : Any = masked_bias snake_case : Optional[Any] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. snake_case : int = out_val.transpose(0 , 1 ).contiguous() # Store. snake_case : List[Any] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": snake_case : List[str] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A ) # Store. No change of shape. snake_case : List[str] = out_val # Transpose the weights. elif weight_or_bias == "weight": snake_case : str = megatron_to_transformers[op_name] snake_case : Tuple = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": snake_case : List[Any] = megatron_to_transformers[op_name] snake_case : int = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. snake_case : List[Any] = transformer["""final_layernorm.weight"""] snake_case : Any = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. snake_case : List[Any] = word_embeddings # It should be done! return output_state_dict def lowercase ( ) -> Any: '''simple docstring''' snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=__A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=__A , help="""An optional config json file describing the pre-trained model.""" , ) snake_case : List[Any] = parser.parse_args() # Extract the basename. snake_case : Tuple = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: snake_case : List[str] = torch.load(__A , map_location="""cpu""" ) else: snake_case : int = torch.load(args.path_to_checkpoint , map_location="""cpu""" ) snake_case : Dict = input_state_dict.get("""args""" , __A ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: snake_case : int = """gelu_fast""" elif ds_args.openai_gelu: snake_case : Union[str, Any] = """gelu_new""" else: snake_case : int = """gelu""" else: # in the very early days this used to be "gelu_new" snake_case : str = """gelu_new""" # Spell out all parameters in case the defaults change. snake_case : List[Any] = GPTaConfig( vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__A , summary_activation=__A , summary_proj_to_labels=__A , summary_first_dropout=0.1 , scale_attn_weights=__A , use_cache=__A , bos_token_id=5_0256 , eos_token_id=5_0256 , ) else: snake_case : int = GPTaConfig.from_json_file(args.config_file ) snake_case : int = ["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) snake_case : str = convert_megatron_checkpoint(__A , __A , __A ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__A , __A ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: snake_case : str = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": snake_case : Tuple = """gpt2""" elif tokenizer_type == "PretrainedFromHF": snake_case : List[str] = ds_args.tokenizer_name_or_path else: raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" ) else: snake_case : Any = """gpt2""" snake_case : List[str] = AutoTokenizer.from_pretrained(__A ) snake_case : Optional[Any] = type(__A ).__name__ snake_case : Tuple = tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(__A ) # Save tokenizer based on args print(f"""Adding {tokenizer_class} tokenizer files""" ) tokenizer.save_pretrained(__A ) # Store the state_dict to file. snake_case : List[str] = os.path.join(__A , """pytorch_model.bin""" ) print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" ) torch.save(__A , __A ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
36
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
0
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin UpperCamelCase : int = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Dict=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=14 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : int=19 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : int=4 , lowerCamelCase__ : str=True , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=[1, 2, 3, 4, 5] , lowerCamelCase__ : List[Any]=25 , lowerCamelCase__ : List[Any]=5 , ): a__ : Optional[Any] = d_model a__ : List[str] = parent a__ : str = batch_size a__ : List[Any] = prediction_length a__ : List[Any] = context_length a__ : str = cardinality a__ : List[Any] = num_time_features a__ : Dict = lags_sequence a__ : int = embedding_dimension a__ : Dict = is_training a__ : Dict = hidden_size a__ : int = num_hidden_layers a__ : str = num_attention_heads a__ : Dict = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Optional[Any] = attention_probs_dropout_prob a__ : Optional[int] = context_length a__ : Any = prediction_length + label_length a__ : List[Any] = label_length a__ : int = moving_average a__ : Any = autocorrelation_factor def _UpperCamelCase( self : Dict ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] ): a__ : str = config.context_length + max(config.lags_sequence ) a__ : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) a__ : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) a__ : Optional[int] = floats_tensor([self.batch_size, _past_length] ) a__ : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs a__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) a__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] ) a__ : str = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.get_config() a__ : Any = self.prepare_autoformer_inputs_dict(lowerCamelCase__ ) return config, inputs_dict def _UpperCamelCase( self : str ): a__, a__ : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ): a__ : Union[str, Any] = AutoformerModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() a__ : List[Any] = model(**lowerCamelCase__ ) a__ : int = outputs.encoder_last_hidden_state a__ : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: a__ : str = model.get_encoder() encoder.save_pretrained(lowerCamelCase__ ) a__ : List[str] = AutoformerEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ ) a__, a__, a__, a__, a__ : List[str] = model.create_network_inputs(**lowerCamelCase__ ) a__, a__ : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) a__ : str = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) a__ : Optional[int] = encoder(inputs_embeds=lowerCamelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) a__ : Tuple = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) a__ : Tuple = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) a__ : Dict = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) a__ : Union[str, Any] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: a__ : Any = model.get_decoder() decoder.save_pretrained(lowerCamelCase__ ) a__ : Dict = AutoformerDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ ) a__ : Optional[int] = decoder( trend=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowercase = (AutoformerForPrediction,) if is_torch_available() else () _lowercase = {'feature-extraction': AutoformerModel} if is_torch_available() else {} _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Optional[Any] ): a__ : List[Any] = AutoformerModelTester(self ) a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): self.config_tester.run_common_tests() def _UpperCamelCase( self : List[Any] ): a__, a__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase__ ) a__, a__ : List[str] = model_class.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _UpperCamelCase( self : Union[str, Any] ): a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _UpperCamelCase( self : Optional[Any] ): pass def _UpperCamelCase( self : Any ): a__ : str = inspect.signature(getattr(lowerCamelCase__ , "forward" ) ) # The main input is the name of the argument after `self` a__ : int = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[Any] = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Tuple = [*signature.parameters.keys()] a__ : Any = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Any = True a__ : int = getattr(self.model_tester , "seq_length" , lowerCamelCase__ ) a__ : Optional[Any] = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase__ ) a__ : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase__ ) a__ : Optional[int] = getattr(self.model_tester , "d_model" , lowerCamelCase__ ) a__ : Tuple = getattr(self.model_tester , "num_attention_heads" , lowerCamelCase__ ) a__ : Union[str, Any] = d_model // num_attention_heads for model_class in self.all_model_classes: a__ : Tuple = True a__ : List[Any] = False a__ : Optional[Any] = True a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) a__ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] a__ : List[str] = True a__ : Any = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) a__ : str = outputs.encoder_attentions self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) a__ : int = len(lowerCamelCase__ ) a__ : Tuple = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) # decoder attentions a__ : List[Any] = outputs.decoder_attentions self.assertIsInstance(lowerCamelCase__ , (list, tuple) ) self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions a__ : Optional[int] = outputs.cross_attentions self.assertIsInstance(lowerCamelCase__ , (list, tuple) ) self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine a__ : int = True a__ : List[str] = True a__ : Union[str, Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCamelCase__ ) ) a__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _UpperCamelCase( self : Any ): super().test_retain_grad_hidden_states_attentions() def UpperCamelCase_ ( __a="train-batch.pt" ) -> Optional[Any]: a__ : Union[str, Any] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__a , repo_type="dataset" ) a__ : Tuple = torch.load(__a , map_location=__a ) return batch @require_torch @slow class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ ) a__ : Optional[Any] = prepare_batch() with torch.no_grad(): a__ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] a__ : Union[str, Any] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowerCamelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict ): a__ : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ ) a__ : int = prepare_batch("val-batch.pt" ) with torch.no_grad(): a__ : List[Any] = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state a__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCamelCase__ ) a__ : Tuple = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowerCamelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict ): a__ : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ ) a__ : int = prepare_batch("val-batch.pt" ) with torch.no_grad(): a__ : List[str] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) a__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCamelCase__ ) a__ : Tuple = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCamelCase__ ) a__ : Tuple = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase__ , rtol=1E-1 ) )
37
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A_ : Optional[int] = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
38
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
0
from __future__ import annotations from collections.abc import MutableSequence class snake_case_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : MutableSequence[float] ) ->None: if len(_UpperCamelCase ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) snake_case_ = list(_UpperCamelCase ) snake_case_ = degree def __add__( self : Any , _UpperCamelCase : Polynomial ) ->Polynomial: if self.degree > polynomial_a.degree: snake_case_ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _UpperCamelCase ) else: snake_case_ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _UpperCamelCase ) def __sub__( self : Any , _UpperCamelCase : Polynomial ) ->Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : List[Any] ) ->Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : List[Any] , _UpperCamelCase : Polynomial ) ->Polynomial: snake_case_ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : int | float ) ->int | float: snake_case_ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Tuple ) ->str: snake_case_ = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCamelCase ) return polynomial def __repr__( self : Union[str, Any] ) ->str: return self.__str__() def snake_case__( self : Any ) ->Polynomial: snake_case_ = [0] * self.degree for i in range(self.degree ): snake_case_ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _UpperCamelCase ) def snake_case__( self : List[Any] , _UpperCamelCase : int | float = 0 ) ->Polynomial: snake_case_ = [0] * (self.degree + 2) snake_case_ = constant for i in range(self.degree + 1 ): snake_case_ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _UpperCamelCase ) def __eq__( self : str , _UpperCamelCase : object ) ->bool: if not isinstance(_UpperCamelCase , _UpperCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : str , _UpperCamelCase : object ) ->bool: return not self.__eq__(_UpperCamelCase )
39
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
0