code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """moussaKam/mbarthez""": 1_0_2_4, """moussaKam/barthez""": 1_0_2_4, """moussaKam/barthez-orangesum-title""": 1_0_2_4, } _SCREAMING_SNAKE_CASE = """▁""" class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["input_ids", "attention_mask"] __lowerCAmelCase = BarthezTokenizer def __init__( self : Optional[Any] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]="<s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : Any="<pad>" , lowerCamelCase_ : Union[str, Any]="<mask>" , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
343
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
67
0
from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __magic_name__ ( UpperCAmelCase__ ): def __init__( self , __snake_case , __snake_case ) -> List[Any]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __a =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__snake_case , scheduler=__snake_case ) @torch.no_grad() def __call__( self , __snake_case = 1 , __snake_case = None , __snake_case = 0.0 , __snake_case = 50 , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> List[Any]: '''simple docstring''' if isinstance(self.unet.config.sample_size , __snake_case ): __a =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: __a =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) __a =randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(__snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __a =self.unet(__snake_case , __snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __a =self.scheduler.step( __snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case ).prev_sample __a =(image / 2 + 0.5).clamp(0 , 1 ) __a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a =self.numpy_to_pil(__snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=__snake_case )
218
'''simple docstring''' from __future__ import annotations from decimal import Decimal from numpy import array def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]: __lowerCamelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix __lowerCamelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements __lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]] __lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0] __lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(UpperCamelCase__ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule __lowerCamelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix __lowerCamelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] __lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) __lowerCamelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) __lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) __lowerCamelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) __lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) __lowerCamelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) __lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) __lowerCamelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) __lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) __lowerCamelCase = array(UpperCamelCase__ ) for i in range(3 ): for j in range(3 ): __lowerCamelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix __lowerCamelCase = array(UpperCamelCase__ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(UpperCamelCase__ ) # Calculate the inverse of the matrix return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
67
0
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase : Any = 16 _lowerCamelCase : Optional[Any] = 32 def __lowerCamelCase ( A__ , A__ = 16 ) -> Optional[Any]: """simple docstring""" UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(A__ ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(A__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase = 16 elif accelerator.mixed_precision != "no": UpperCamelCase = 8 else: UpperCamelCase = None return tokenizer.pad( UpperCamelCase__ , padding='longest' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCamelCase : Tuple = mocked_dataloaders # noqa: F811 def __lowerCamelCase ( A__ , A__ ) -> Any: """simple docstring""" # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase__ ) == "1": UpperCamelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase = config['lr'] UpperCamelCase = int(config['num_epochs'] ) UpperCamelCase = int(config['seed'] ) UpperCamelCase = int(config['batch_size'] ) set_seed(UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE UpperCamelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase = AdamW(params=model.parameters() , lr=UpperCamelCase__ ) # Instantiate scheduler UpperCamelCase = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: UpperCamelCase = os.path.split(UpperCamelCase__ )[-1].split('.' )[0] accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ ) # Now we train the model for epoch in range(UpperCamelCase__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: UpperCamelCase = 0 for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCamelCase = model(**UpperCamelCase__ ) UpperCamelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase = model(**UpperCamelCase__ ) UpperCamelCase = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { 'accuracy': eval_metric['accuracy'], 'f1': eval_metric['f1'], 'train_loss': total_loss.item() / len(UpperCamelCase__ ), 'epoch': epoch, } , step=UpperCamelCase__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) UpperCamelCase = parser.parse_args() UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
28
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __UpperCAmelCase =logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int: return field(default_factory=lambda: default , metadata=UpperCamelCase__ ) @dataclass class a__ : lowerCamelCase : List[str] =list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) lowerCamelCase : List[int] =list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) lowerCamelCase : List[int] =list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) lowerCamelCase : str =field( default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) lowerCamelCase : str =field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) lowerCamelCase : str =field( default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) lowerCamelCase : str =field( default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) lowerCamelCase : str =field( default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) lowerCamelCase : str =field( default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , a , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
67
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'spm_char.model'} a_ = { 'vocab_file': { 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model', 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model', 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model', } } a_ = { 'microsoft/speecht5_asr': 1_024, 'microsoft/speecht5_tts': 1_024, 'microsoft/speecht5_vc': 1_024, } class _lowercase ( UpperCAmelCase__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] def __init__( self : List[Any] , snake_case : Dict , snake_case : Tuple="<s>" , snake_case : List[Any]="</s>" , snake_case : Union[str, Any]="<unk>" , snake_case : List[Any]="<pad>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ) -> Dict: """simple docstring""" UpperCamelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) UpperCamelCase_ : Dict = vocab_file UpperCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: """simple docstring""" return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ : List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ : int = self.__dict__.copy() UpperCamelCase_ : Dict = None return state def __setstate__( self : Tuple , snake_case : Dict ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ : Dict = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCamelCase_ : List[Any] = {} UpperCamelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str ) -> str: """simple docstring""" return self.sp_model.encode(snake_case , out_type=snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[str] ) -> Dict: """simple docstring""" return self.sp_model.piece_to_id(snake_case ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : List[str] = self.sp_model.IdToPiece(snake_case ) return token def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Dict ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : Optional[Any] = [] UpperCamelCase_ : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(snake_case ) + token UpperCamelCase_ : Any = [] else: current_sub_tokens.append(snake_case ) out_string += self.sp_model.decode(snake_case ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str , snake_case : List[str]=None ) -> Optional[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> int: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) UpperCamelCase_ : int = [1] if token_ids_a is None: return ([0] * len(snake_case )) + suffix_ones return ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str , snake_case : Optional[str] = None ) -> Optional[int]: """simple docstring""" if not os.path.isdir(snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCamelCase_ : Dict = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , 'wb' ) as fi: UpperCamelCase_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
175
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCAmelCase =None __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase ={ "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase ={ "moussaKam/mbarthez": 1_0_2_4, "moussaKam/barthez": 1_0_2_4, "moussaKam/barthez-orangesum-title": 1_0_2_4, } __UpperCAmelCase ="▁" class a__ ( UpperCAmelCase__ ): lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] =["input_ids", "attention_mask"] lowerCamelCase : Union[str, Any] =BarthezTokenizer def __init__( self : Optional[Any] , a : Dict=None , a : str=None , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : str="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , **a : Union[str, Any] , ): """simple docstring""" __lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) __lowerCamelCase = vocab_file __lowerCamelCase = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] __lowerCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ): """simple docstring""" __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
67
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase__: str =flax_key_tuple[:-1] + ("weight",) lowerCamelCase__: List[str] =torch.permute(UpperCamelCase__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ): # linear layer lowerCamelCase__: Tuple =flax_key_tuple[:-1] + ("weight",) lowerCamelCase__: int =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase__: Optional[int] =flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if "metadata" in layer: lowerCamelCase__: Optional[Any] =layer.split("metadata" ) lowerCamelCase__: List[str] ="".join(split_layer[0] )[:-1] lowerCamelCase__: List[Any] =[tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: lowerCamelCase__: Optional[int] =layer.split("kvstore" ) lowerCamelCase__: str ="".join(split_layer[0] )[:-1] lowerCamelCase__: str =[tuple(("kvstore" + split_layer[1]).split("/" ) )] else: lowerCamelCase__: Dict =layer.split("/" ) lowerCamelCase__: Any ="/".join(split_layer[:-1] ) lowerCamelCase__: Union[str, Any] =(split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase__: Tuple =F"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: lowerCamelCase__: int ="file" else: lowerCamelCase__: Union[str, Any] =checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCAmelCase_ ( __a , __a ) -> Dict: """simple docstring""" lowerCamelCase__: List[str] =rename_keys(UpperCamelCase__ ) lowerCamelCase__: str ={} for k, v in current_block.items(): lowerCamelCase__: Union[str, Any] =v lowerCamelCase__: Optional[Any] =new_current_block torch.save(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a = WEIGHTS_NAME ) -> Tuple: """simple docstring""" lowerCamelCase__: List[str] =convert_file_size_to_int(UpperCamelCase__ ) lowerCamelCase__: Dict =[] lowerCamelCase__: Dict ={} lowerCamelCase__: str =0 lowerCamelCase__: Tuple =0 os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: lowerCamelCase__: int =serialization.msgpack_restore(fp.read() )["optimizer"]["target"] lowerCamelCase__: Union[str, Any] =flatten_dict(UpperCamelCase__ , sep="/" ) lowerCamelCase__: List[Any] ={} for layer in checkpoint_info.keys(): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =get_key_and_tensorstore_dict( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if curr_real_layer_name in all_layers: lowerCamelCase__: int =content else: lowerCamelCase__: int ={split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase__: Dict =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase__: Any =torch.tensor(UpperCamelCase__ ) lowerCamelCase__: int =raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase__ , lowerCamelCase__: Any =rename_base_flax_keys(tuple(key.split("/" ) ) , UpperCamelCase__ ) lowerCamelCase__: Tuple ="/".join(UpperCamelCase__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase__: str =os.path.join( UpperCamelCase__ , weights_name.replace(".bin" , F"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase__: str ={} lowerCamelCase__: Optional[Any] =0 lowerCamelCase__: int =raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase__: Optional[Any] =os.path.join(UpperCamelCase__ , weights_name.replace(".bin" , F"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(UpperCamelCase__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase__: List[str] ={} lowerCamelCase__: List[str] ={} for idx, shard in enumerate(UpperCamelCase__ ): lowerCamelCase__: int =weights_name.replace( ".bin" , F"""-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin""" ) # len(sharded_state_dicts):05d} lowerCamelCase__: Union[str, Any] =os.path.join(UpperCamelCase__ , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase__: List[Any] =shard for key in shard: lowerCamelCase__: Any =shard_file # Add the metadata lowerCamelCase__: Tuple ={"total_size": total_size} lowerCamelCase__: Union[str, Any] ={"metadata": metadata, "weight_map": weight_map} with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , "w" , encoding="utf-8" ) as f: lowerCamelCase__: Any =json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + "\n" f.write(UpperCamelCase__ ) return metadata, index if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __A = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase__: int =SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) lowerCamelCase__: str =SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) lowerCamelCase__: str =TaTokenizer.from_pretrained("t5-small" ) lowerCamelCase__: int ="A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." lowerCamelCase__: List[str] =tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids lowerCamelCase__: Tuple =model.generate(UpperCamelCase__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
10
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int: __lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCamelCase__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) ) __lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'{solution() = }')
67
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig _a = { '''facebook/maskformer-swin-base-ade''': ( '''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json''' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } _a = logging.get_logger(__name__) class A_ ( UpperCAmelCase__ ): _lowercase : Union[str, Any] = "maskformer" _lowercase : Dict = {"hidden_size": "mask_feature_size"} _lowercase : Optional[Any] = ["resnet", "swin"] _lowercase : Optional[int] = ["detr"] def __init__( self : List[Any] , UpperCAmelCase : int = 2_5_6 , UpperCAmelCase : int = 2_5_6 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 20.0 , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[Any] , ) -> Any: if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k __lowerCAmelCase: Any = SwinConfig( image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(UpperCAmelCase , UpperCAmelCase ): __lowerCAmelCase: str = backbone_config.pop('model_type' ) __lowerCAmelCase: List[Any] = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase: int = config_class.from_dict(UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' F'''Supported model types: {','.join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 __lowerCAmelCase: str = DetrConfig() else: # verify that the decoder is supported __lowerCAmelCase: str = ( decoder_config.pop('model_type' ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'''Transformer Decoder {decoder_type} not supported, please use one of''' F''' {','.join(self.decoders_supported )}''' ) if isinstance(UpperCAmelCase , UpperCAmelCase ): __lowerCAmelCase: Any = CONFIG_MAPPING[decoder_type] __lowerCAmelCase: List[Any] = config_class.from_dict(UpperCAmelCase ) __lowerCAmelCase: List[str] = backbone_config __lowerCAmelCase: Dict = decoder_config # main feature dimension for the model __lowerCAmelCase: Optional[int] = fpn_feature_size __lowerCAmelCase: int = mask_feature_size # initializer __lowerCAmelCase: Optional[Any] = init_std __lowerCAmelCase: Tuple = init_xavier_std # Hungarian matcher && loss __lowerCAmelCase: Any = cross_entropy_weight __lowerCAmelCase: List[str] = dice_weight __lowerCAmelCase: Optional[int] = mask_weight __lowerCAmelCase: Optional[Any] = use_auxiliary_loss __lowerCAmelCase: int = no_object_weight __lowerCAmelCase: Dict = output_auxiliary_logits __lowerCAmelCase: Tuple = self.decoder_config.encoder_attention_heads __lowerCAmelCase: Dict = self.decoder_config.num_hidden_layers super().__init__(**UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls : str , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: return cls( backbone_config=UpperCAmelCase , decoder_config=UpperCAmelCase , **UpperCAmelCase , ) def UpperCAmelCase ( self : Tuple ) -> List[str]: __lowerCAmelCase: int = copy.deepcopy(self.__dict__ ) __lowerCAmelCase: Optional[int] = self.backbone_config.to_dict() __lowerCAmelCase: Optional[int] = self.decoder_config.to_dict() __lowerCAmelCase: List[Any] = self.__class__.model_type return output
322
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a__ : def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ): """simple docstring""" __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTModel(config=a ) model.to(a ) model.eval() __lowerCamelCase = model(a , token_type_ids=a , head_mask=a ) __lowerCamelCase = model(a , token_type_ids=a ) __lowerCamelCase = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTLMHeadModel(a ) model.to(a ) model.eval() __lowerCamelCase = model(a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTDoubleHeadsModel(a ) model.to(a ) model.eval() __lowerCamelCase = model(a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ): """simple docstring""" __lowerCamelCase = self.num_labels __lowerCamelCase = OpenAIGPTForSequenceClassification(a ) model.to(a ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : List[str] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowerCamelCase : str =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowerCamelCase : Optional[int] =( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ): """simple docstring""" __lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , ) __lowerCamelCase = inputs_dict['''labels'''] __lowerCamelCase = inputs_dict['''labels'''] __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = OpenAIGPTModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_torch class a__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(a ) __lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is __lowerCamelCase = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowerCamelCase = model.generate(a , do_sample=a ) self.assertListEqual(output_ids[0].tolist() , a )
67
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Union[str, Any] , _A : Any , _A : List[Any]=7 , _A : Tuple=3 , _A : Tuple=18 , _A : Optional[int]=30 , _A : Tuple=400 , _A : int=True , _A : Dict=None , _A : List[str]=True , _A : str=None , _A : Union[str, Any]=True , _A : List[Any]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : List[str]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : str=True , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = size if size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : Dict = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : Optional[int] = min_resolution UpperCAmelCase__ : Any = max_resolution UpperCAmelCase__ : int = do_resize UpperCAmelCase__ : Tuple = size UpperCAmelCase__ : Optional[int] = do_center_crop UpperCAmelCase__ : int = crop_size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : List[str] = image_mean UpperCAmelCase__ : str = image_std UpperCAmelCase__ : Tuple = do_convert_rgb def lowercase_ ( self : List[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowercase_ ( self : List[Any] , _A : str=False , _A : int=False , _A : Any=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: UpperCAmelCase__ : Optional[int] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: UpperCAmelCase__ : Dict = [] for i in range(self.batch_size ): UpperCAmelCase__ , UpperCAmelCase__ : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension UpperCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] if torchify: UpperCAmelCase__ : Union[str, Any] = [torch.from_numpy(_A ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowerCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=_A ) @property def lowercase_ ( self : str ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) UpperCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def lowercase_ ( self : str ): '''simple docstring''' pass def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase__ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase__ : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class lowerCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_A ) UpperCAmelCase__ : Union[str, Any] = 3 @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' pass def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
181
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a__ ( UpperCAmelCase__ ): lowerCamelCase : Optional[int] =["image_processor", "tokenizer"] lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor" lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a , ) __lowerCamelCase = kwargs.pop('''feature_extractor''' ) __lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a , a ) def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor __lowerCamelCase = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): __lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) __lowerCamelCase = features['''words'''] __lowerCamelCase = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values __lowerCamelCase = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: __lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] ) __lowerCamelCase = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ): """simple docstring""" __lowerCamelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f""" {len(a )} and {len(a )}""" ) return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ): """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , ) return self.image_processor
67
0
'''simple docstring''' class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =set_counts lowerCamelCase_ =max(lowerCAmelCase ) lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =[1] * num_sets lowerCamelCase_ =list(range(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_parent(lowerCAmelCase ) lowerCamelCase_ =self.get_parent(lowerCAmelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowerCamelCase_ =0 lowerCamelCase_ =dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowerCamelCase_ =self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowerCamelCase_ =0 lowerCamelCase_ =src_parent lowerCamelCase_ =self.set_counts[src_parent] lowerCamelCase_ =max(self.max_set, lowerCAmelCase ) return True def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.parents[disj_set] == disj_set: return disj_set lowerCamelCase_ =self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
75
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) class a__ ( UpperCAmelCase__ ): def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): """simple docstring""" super().__init__() __lowerCamelCase = nn.ModuleList(a ) def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ): """simple docstring""" for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ): __lowerCamelCase , __lowerCamelCase = controlnet( a , a , a , a , a , a , a , a , a , a , a , ) # merge samples if i == 0: __lowerCamelCase , __lowerCamelCase = down_samples, mid_sample else: __lowerCamelCase = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(a , a ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ): """simple docstring""" __lowerCamelCase = 0 __lowerCamelCase = save_directory for controlnet in self.nets: controlnet.save_pretrained( a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , ) idx += 1 __lowerCamelCase = model_path_to_save + f"""_{idx}""" @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ): """simple docstring""" __lowerCamelCase = 0 __lowerCamelCase = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... __lowerCamelCase = pretrained_model_path while os.path.isdir(a ): __lowerCamelCase = ControlNetModel.from_pretrained(a , **a ) controlnets.append(a ) idx += 1 __lowerCamelCase = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" ) if len(a ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(a )
67
0
def A ( _lowercase = 50 ): SCREAMING_SNAKE_CASE : List[str] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
182
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING __UpperCAmelCase =logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class a__ ( UpperCAmelCase__ ): def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ): """simple docstring""" super().__init__(*a , **a ) requires_backends(self , '''vision''' ) self.check_model_type(a ) def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ): """simple docstring""" return super().__call__(a , **a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ): """simple docstring""" return {}, {}, {} def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ): """simple docstring""" __lowerCamelCase = load_image(a ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ): """simple docstring""" __lowerCamelCase = self.model(**a ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ): """simple docstring""" __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(a ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
67
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def lowercase (snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase = state_dict.pop(UpperCamelCase__ ) lowerCAmelCase = val def lowercase (snake_case__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCAmelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) lowerCAmelCase = value else: lowerCAmelCase = value return new_state_dict def lowercase (snake_case__ : List[Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase = in_proj_weight[:256, :] lowerCAmelCase = in_proj_bias[:256] lowerCAmelCase = in_proj_weight[256:512, :] lowerCAmelCase = in_proj_bias[256:512] lowerCAmelCase = in_proj_weight[-256:, :] lowerCAmelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase = in_proj_weight[:256, :] lowerCAmelCase = in_proj_bias[:256] lowerCAmelCase = in_proj_weight[256:512, :] lowerCAmelCase = in_proj_bias[256:512] lowerCAmelCase = in_proj_weight[-256:, :] lowerCAmelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowerCAmelCase = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowerCAmelCase = in_proj_weight_cross_attn[:256, :] lowerCAmelCase = in_proj_bias_cross_attn[:256] lowerCAmelCase = in_proj_weight_cross_attn[256:512, :] lowerCAmelCase = in_proj_bias_cross_attn[256:512] lowerCAmelCase = in_proj_weight_cross_attn[-256:, :] lowerCAmelCase = in_proj_bias_cross_attn[-256:] def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] ) -> str: '''simple docstring''' lowerCAmelCase , lowerCAmelCase = image.size lowerCAmelCase = max(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase = 800 if """detection""" in checkpoint_url else 1_000 lowerCAmelCase = target_max_size / current_max_size lowerCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def lowercase (snake_case__ : Optional[Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase = F.to_tensor(UpperCamelCase__ ) lowerCAmelCase = F.normalize(UpperCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def lowercase (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] ) -> Dict: '''simple docstring''' logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase = rename_backbone_keys(UpperCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCAmelCase = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowerCAmelCase = state_dict.pop(UpperCamelCase__ ) lowerCAmelCase = val # create HuggingFace model and load state dict lowerCAmelCase = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: lowerCAmelCase = 15 lowerCAmelCase = 2 lowerCAmelCase = {0: """table""", 1: """table rotated"""} lowerCAmelCase = idalabel lowerCAmelCase = {v: k for k, v in idalabel.items()} else: lowerCAmelCase = 125 lowerCAmelCase = 6 lowerCAmelCase = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } lowerCAmelCase = idalabel lowerCAmelCase = {v: k for k, v in idalabel.items()} lowerCAmelCase = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1_000 ) lowerCAmelCase = TableTransformerForObjectDetection(UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify our conversion lowerCAmelCase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" lowerCAmelCase = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase__ ) lowerCAmelCase = Image.open(UpperCamelCase__ ).convert("""RGB""" ) lowerCAmelCase = normalize(resize(UpperCamelCase__ , UpperCamelCase__ ) ).unsqueeze(0 ) lowerCAmelCase = model(UpperCamelCase__ ) if "detection" in checkpoint_url: lowerCAmelCase = (1, 15, 3) lowerCAmelCase = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) lowerCAmelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: lowerCAmelCase = (1, 125, 7) lowerCAmelCase = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) lowerCAmelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) lowerCAmelCase = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(UpperCamelCase__ ) image_processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
155
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase ={ "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] __UpperCAmelCase =["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a : """simple docstring""" def __init__( self : List[str] , __lowercase : Optional[int] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : Optional[Any]=False , __lowercase : Tuple=True , __lowercase : int=False , __lowercase : str=False , __lowercase : List[Any]=19 , __lowercase : Union[str, Any]=32 , __lowercase : Optional[int]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=512 , __lowercase : Dict=16 , __lowercase : List[str]=2 , __lowercase : Optional[Any]=0.02 , __lowercase : int=3 , __lowercase : Optional[Any]=4 , __lowercase : int=None , ) -> str: __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Dict = seq_length __UpperCAmelCase : Any = is_training __UpperCAmelCase : str = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : List[str] = num_attention_heads __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : Union[str, Any] = type_sequence_label_size __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : int = num_labels __UpperCAmelCase : int = num_choices __UpperCAmelCase : Union[str, Any] = scope def UpperCAmelCase ( self : Any ) -> List[str]: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : Any ) -> Union[str, Any]: __UpperCAmelCase : str = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowercase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , ) return config def UpperCAmelCase ( self : Any , __lowercase : Any , __lowercase : Dict , __lowercase : str , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : int ) -> List[str]: __UpperCAmelCase : int = EsmForProteinFolding(config=__lowercase ).float() model.to(__lowercase ) model.eval() __UpperCAmelCase : Optional[Any] = model(__lowercase , attention_mask=__lowercase ) __UpperCAmelCase : int = model(__lowercase ) __UpperCAmelCase : Tuple = model(__lowercase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def UpperCAmelCase ( self : Optional[Any] ) -> int: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Optional[int] = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" a : List[Any] = False a : List[Any] = (EsmForProteinFolding,) if is_torch_available() else () a : Tuple = () a : Any = {} if is_torch_available() else {} a : Any = False def UpperCAmelCase ( self : List[str] ) -> str: __UpperCAmelCase : Dict = EsmFoldModelTester(self ) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def UpperCAmelCase ( self : List[Any] ) -> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase ( self : str ) -> List[Any]: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) @unittest.skip("""Does not support attention outputs""" ) def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: pass @unittest.skip("""Esm does not support embedding resizing""" ) def UpperCAmelCase ( self : Dict ) -> Dict: pass @unittest.skip("""ESMFold does not support passing input embeds!""" ) def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def UpperCAmelCase ( self : Union[str, Any] ) -> str: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def UpperCAmelCase ( self : Dict ) -> Optional[int]: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def UpperCAmelCase ( self : List[str] ) -> List[str]: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: pass @unittest.skip("""ESMFold does not support head pruning.""" ) def UpperCAmelCase ( self : int ) -> str: pass @unittest.skip("""ESMFold does not output hidden states in the normal way.""" ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: pass @unittest.skip("""ESMfold does not output hidden states in the normal way.""" ) def UpperCAmelCase ( self : Optional[Any] ) -> Dict: pass @unittest.skip("""ESMFold only has one output format.""" ) def UpperCAmelCase ( self : Dict ) -> Dict: pass @unittest.skip("""This test doesn\'t work for ESMFold and doesn\'t test core functionality""" ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: pass @unittest.skip("""ESMFold does not support input chunking.""" ) def UpperCAmelCase ( self : List[Any] ) -> Dict: pass @unittest.skip("""ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.""" ) def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" ) def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" ) def UpperCAmelCase ( self : int ) -> Union[str, Any]: pass @unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" ) def UpperCAmelCase ( self : str ) -> Dict: pass @unittest.skip("""ESMFold doesn\'t support data parallel.""" ) def UpperCAmelCase ( self : str ) -> Optional[int]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: pass @require_torch class a ( UpperCAmelCase__ ): """simple docstring""" @slow def UpperCAmelCase ( self : str ) -> int: __UpperCAmelCase : Tuple = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float() model.eval() __UpperCAmelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __UpperCAmelCase : Any = model(__lowercase )["""positions"""] __UpperCAmelCase : int = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowercase , atol=1e-4 ) )
114
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class a__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = '''ylacombe/bark-small''' __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = '''en_speaker_1''' __lowerCamelCase = '''This is a test string''' __lowerCamelCase = '''speaker_embeddings_path.json''' __lowerCamelCase = '''speaker_embeddings''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **a ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BarkProcessor(tokenizer=a ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __lowerCamelCase = 35 __lowerCamelCase = 2 __lowerCamelCase = 8 __lowerCamelCase = { '''semantic_prompt''': np.ones(a ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __lowerCamelCase = processor(text=self.input_string , voice_preset=a ) __lowerCamelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() ) # test loading voice preset from npz file __lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(a , **a ) __lowerCamelCase = processor(text=self.input_string , voice_preset=a ) __lowerCamelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() ) # test loading voice preset from the hub __lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BarkProcessor(tokenizer=a ) __lowerCamelCase = processor(text=self.input_string ) __lowerCamelCase = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
67
0
from __future__ import annotations from collections.abc import Iterator from typing import Any class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None class SCREAMING_SNAKE_CASE_ : def __init__( self : Any ): """simple docstring""" UpperCamelCase = None UpperCamelCase = None def __iter__( self : Any ): """simple docstring""" UpperCamelCase = self.head while self.head: yield node.data UpperCamelCase = node.next if node == self.head: break def __len__( self : Any ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Union[str, Any] ): """simple docstring""" return "->".join(str(lowerCamelCase_ ) for item in iter(self ) ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): """simple docstring""" self.insert_nth(len(self ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any ): """simple docstring""" self.insert_nth(0 , lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Any ): """simple docstring""" if index < 0 or index > len(self ): raise IndexError("""list index out of range.""" ) UpperCamelCase = Node(lowerCamelCase_ ) if self.head is None: UpperCamelCase = new_node # first node points itself UpperCamelCase = UpperCamelCase = new_node elif index == 0: # insert at head UpperCamelCase = self.head UpperCamelCase = UpperCamelCase = new_node else: UpperCamelCase = self.head for _ in range(index - 1 ): UpperCamelCase = temp.next UpperCamelCase = temp.next UpperCamelCase = new_node if index == len(self ) - 1: # insert at tail UpperCamelCase = new_node def lowerCamelCase_ ( self : Dict ): """simple docstring""" return self.delete_nth(0 ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int = 0 ): """simple docstring""" if not 0 <= index < len(self ): raise IndexError("""list index out of range.""" ) UpperCamelCase = self.head if self.head == self.tail: # just one node UpperCamelCase = UpperCamelCase = None elif index == 0: # delete head node UpperCamelCase = self.tail.next.next UpperCamelCase = self.head.next else: UpperCamelCase = self.head for _ in range(index - 1 ): UpperCamelCase = temp.next UpperCamelCase = temp.next UpperCamelCase = temp.next.next if index == len(self ) - 1: # delete at tail UpperCamelCase = temp return delete_node.data def lowerCamelCase_ ( self : Dict ): """simple docstring""" return len(self ) == 0 def lowercase( ) -> None: '''simple docstring''' UpperCamelCase = CircularLinkedList() assert len(UpperCamelCase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCamelCase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCamelCase__ ) == i circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
343
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
0
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) def UpperCamelCase_( _snake_case : Dict=None , _snake_case : Dict=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=UpperCamelCase__ ) @dataclass class __magic_name__ : SCREAMING_SNAKE_CASE = list_field( default=[] , metadata={ 'help': ( 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version' ' of all available models' ) } , ) SCREAMING_SNAKE_CASE = list_field( default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} ) SCREAMING_SNAKE_CASE = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Benchmark training of model'} ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Verbose memory tracing'} ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={ 'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory' } , ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Trace memory line by line'} ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Save result to a CSV file'} ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Save all print statements in a log file'} ) SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'help': 'Whether to print environment information'} ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use' ' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled' ' for debugging / testing and on TPU.' ) } , ) SCREAMING_SNAKE_CASE = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , ) SCREAMING_SNAKE_CASE = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , ) SCREAMING_SNAKE_CASE = field( default=f'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , ) SCREAMING_SNAKE_CASE = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , ) SCREAMING_SNAKE_CASE = field( default=f'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , ) SCREAMING_SNAKE_CASE = field( default=f'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , ) SCREAMING_SNAKE_CASE = field(default=3 , metadata={'help': 'Times an experiment will be run.'} ) SCREAMING_SNAKE_CASE = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain' ' model weights.' ) } , ) def __magic_name__ ( self ) -> Any: '''simple docstring''' warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' ' are deprecated in general and it is advised to use external Benchmarking libraries ' ' to benchmark Transformer models.' , __snake_case , ) def __magic_name__ ( self ) -> Tuple: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __magic_name__ ( self ) -> Dict: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( 'Please make sure you provide at least one model name / model identifier, *e.g.* `--models' ' bert-base-cased` or `args.models = [\'bert-base-cased\'].' ) return self.models @property def __magic_name__ ( self ) -> Any: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info('Multiprocessing is currently not possible on TPU.' ) return False else: return True
218
'''simple docstring''' import re from filelock import FileLock try: import nltk __UpperCAmelCase =True except (ImportError, ModuleNotFoundError): __UpperCAmelCase =False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def __lowerCAmelCase ( UpperCamelCase__ ) -> str: re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
67
0
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __lowerCamelCase ( A__ , A__ ) -> Tuple: """simple docstring""" UpperCamelCase = args.log_outputs UpperCamelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric UpperCamelCase = load_metric('wer' ) UpperCamelCase = load_metric('cer' ) # compute metrics UpperCamelCase = wer.compute(references=result['target'] , predictions=result['prediction'] ) UpperCamelCase = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results UpperCamelCase = F"""WER: {wer_result}\nCER: {cer_result}""" print(UpperCamelCase__ ) with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f: f.write(UpperCamelCase__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCamelCase = F"""log_{dataset_id}_predictions.txt""" UpperCamelCase = F"""log_{dataset_id}_targets.txt""" with open(UpperCamelCase__ , 'w' ) as p, open(UpperCamelCase__ , 'w' ) as t: # mapping function to write output def write_to_file(A__ , A__ ): p.write(F"""{i}""" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F"""{i}""" + '\n' ) t.write(batch['target'] + '\n' ) result.map(UpperCamelCase__ , with_indices=UpperCamelCase__ ) def __lowerCamelCase ( A__ ) -> str: """simple docstring""" UpperCamelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCamelCase = re.sub(UpperCamelCase__ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCamelCase = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: UpperCamelCase = ' '.join(text.split(UpperCamelCase__ ) ) return text def __lowerCamelCase ( A__ ) -> List[Any]: """simple docstring""" # load dataset UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCamelCase = feature_extractor.sampling_rate # resample audio UpperCamelCase = dataset.cast_column('audio' , Audio(sampling_rate=UpperCamelCase__ ) ) # load eval pipeline if args.device is None: UpperCamelCase = 0 if torch.cuda.is_available() else -1 UpperCamelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(A__ ): UpperCamelCase = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCamelCase = prediction['text'] UpperCamelCase = normalize_text(batch['sentence'] ) return batch # run inference on all examples UpperCamelCase = dataset.map(UpperCamelCase__ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) _lowerCamelCase : int = parser.parse_args() main(args)
28
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class a__ ( UpperCAmelCase__ ): lowerCamelCase : Optional[int] ="gpt_neox_japanese" def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ): """simple docstring""" super().__init__(bos_token_id=a , eos_token_id=a , **a ) __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_multiple_size __lowerCamelCase = hidden_act __lowerCamelCase = rotary_pct __lowerCamelCase = rotary_emb_base __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = attention_dropout __lowerCamelCase = hidden_dropout
67
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a_ = logging.get_logger(__name__) a_ = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase = "convnextv2" def __init__( self : int , snake_case : Dict=3 , snake_case : List[str]=4 , snake_case : str=4 , snake_case : Dict=None , snake_case : List[str]=None , snake_case : Tuple="gelu" , snake_case : Tuple=0.02 , snake_case : Dict=1e-12 , snake_case : Any=0.0 , snake_case : List[str]=2_2_4 , snake_case : Optional[Any]=None , snake_case : int=None , **snake_case : Any , ) -> Optional[int]: """simple docstring""" super().__init__(**snake_case ) UpperCamelCase_ : str = num_channels UpperCamelCase_ : Optional[Any] = patch_size UpperCamelCase_ : Optional[Any] = num_stages UpperCamelCase_ : str = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes UpperCamelCase_ : Any = [3, 3, 9, 3] if depths is None else depths UpperCamelCase_ : Optional[Any] = hidden_act UpperCamelCase_ : str = initializer_range UpperCamelCase_ : Union[str, Any] = layer_norm_eps UpperCamelCase_ : Optional[int] = drop_path_rate UpperCamelCase_ : Union[str, Any] = image_size UpperCamelCase_ : Optional[int] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase_, UpperCamelCase_ : str = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
175
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: while second != 0: __lowerCamelCase = first & second first ^= second __lowerCamelCase = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase =int(input("Enter the first number: ").strip()) __UpperCAmelCase =int(input("Enter the second number: ").strip()) print(f'{add(first, second) = }')
67
0
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowercase_ = RoCBertTokenizer lowercase_ = None lowercase_ = False lowercase_ = True lowercase_ = filter_non_english def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple: '''simple docstring''' super().setUp() lowerCamelCase__: Optional[int] =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] lowerCamelCase__: int ={} lowerCamelCase__: int ={} for i, value in enumerate(UpperCAmelCase_): lowerCamelCase__: Union[str, Any] =i lowerCamelCase__: List[Any] =i lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) lowerCamelCase__: List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"]) lowerCamelCase__: str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer: json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer: json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowerCamelCase__: Optional[int] =tokenizer.tokenize("你好[SEP]你是谁") self.assertListEqual(UpperCAmelCase_ , ["你", "好", "[SEP]", "你", "是", "谁"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8]) def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"]) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int: '''simple docstring''' lowerCamelCase__: str =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"]) def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[str] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str: '''simple docstring''' lowerCamelCase__: List[str] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"]) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[int] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"]) def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' lowerCamelCase__: Optional[int] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"]) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCamelCase__: Tuple ={} for i, token in enumerate(UpperCAmelCase_): lowerCamelCase__: List[Any] =i lowerCamelCase__: Optional[Any] =RoCBertWordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize("") , []) self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"]) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict: '''simple docstring''' self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def SCREAMING_SNAKE_CASE_ (self : str) ->Any: '''simple docstring''' lowerCamelCase__: List[str] =self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]]) if self.test_rust_tokenizer: lowerCamelCase__: Optional[int] =self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]]) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCamelCase__: str =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Optional[int] =F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowerCamelCase__: Optional[Any] =tokenizer_r.encode_plus( UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , ) lowerCamelCase__: int =tokenizer_r.do_lower_case if hasattr(UpperCAmelCase_ , "do_lower_case") else False lowerCamelCase__: Tuple =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"]) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[Any] =["的", "人", "有"] lowerCamelCase__: int ="".join(UpperCAmelCase_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCamelCase__: int =True lowerCamelCase__: str =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: List[str] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: Dict =tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: int =tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[Any] =False lowerCamelCase__: Union[str, Any] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Optional[Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: List[str] =tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: int =tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_) lowerCamelCase__: Any =tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase__: Tuple =[ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase_) ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) @slow def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' lowerCamelCase__: Dict =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowerCamelCase__: List[Any] =tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: Dict =tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def SCREAMING_SNAKE_CASE_ (self : str) ->int: '''simple docstring''' lowerCamelCase__: Dict =self.get_tokenizers(do_lower_case=UpperCAmelCase_) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}"""): lowerCamelCase__: Union[str, Any] ="你好,你是谁" lowerCamelCase__: Union[str, Any] =tokenizer.tokenize(UpperCAmelCase_) lowerCamelCase__: Dict =tokenizer.convert_tokens_to_ids(UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =tokenizer.prepare_for_model( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: Dict =tokenizer.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase ={ "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
0
from datetime import datetime import requests def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> bytes: """simple docstring""" __lowerCAmelCase: Optional[Any] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __lowerCAmelCase: Optional[Any] = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": _a = input('''Enter Video/IGTV url: ''').strip() _a = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(f"Done. Video saved to disk as {file_name}.")
322
'''simple docstring''' import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCAmelCase =logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None: __lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCAmelCase ={ # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 1_2: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 1_1], 4: [0, 4, 8, 1_1], 6: [0, 2, 4, 7, 9, 1_1], 9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1], 1_2: list(range(1_2)), }, 1_6: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 1_5], 3: [0, 8, 1_5], 4: [0, 5, 1_0, 1_5], 6: [0, 3, 6, 9, 1_2, 1_5], 8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5], 9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5], 1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5], 1_6: list(range(1_6)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCAmelCase ={ # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]}, 1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]}, } def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: try: __lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" f""" {n_student}""" ) return list(range(UpperCamelCase__ ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]: if n_student > n_teacher: raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(UpperCamelCase__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]: __lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(UpperCamelCase__ , UpperCamelCase__ ): AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval() else: assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}""" __lowerCamelCase = teacher.config.to_diff_dict() try: __lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __lowerCamelCase = teacher_e if d is None: __lowerCamelCase = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): __lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __lowerCamelCase = teacher_e if d is None: __lowerCamelCase = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(UpperCamelCase__ ) # Copy weights __lowerCamelCase = teacher.config_class(**UpperCamelCase__ ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" f""" {save_path}""" ) student.save_pretrained(UpperCamelCase__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ ) if d_layers_to_copy is None: __lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ ) try: if hasattr( UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ ) copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) __lowerCamelCase = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(UpperCamelCase__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
67
0
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a__ ( lowerCAmelCase__ , lowerCAmelCase__=0.9_9_9 , lowerCAmelCase__="cosine" , ) -> Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCAmelCase__ ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCAmelCase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase__ : Tuple = [] for i in range(UpperCamelCase__ ): UpperCAmelCase__ : List[str] = i / num_diffusion_timesteps UpperCAmelCase__ : Optional[int] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) ) return torch.tensor(UpperCamelCase__ , dtype=torch.floataa ) class lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers] lowerCAmelCase__ = 2 @register_to_config def __init__( self : Optional[Any] , _A : int = 1_000 , _A : float = 0.0_0_0_8_5 , _A : float = 0.0_1_2 , _A : str = "linear" , _A : Optional[Union[np.ndarray, List[float]]] = None , _A : str = "epsilon" , _A : Optional[bool] = False , _A : Optional[bool] = False , _A : float = 1.0 , _A : str = "linspace" , _A : int = 0 , ): '''simple docstring''' if trained_betas is not None: UpperCAmelCase__ : Tuple = torch.tensor(_A , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase__ : List[str] = torch.linspace(_A , _A , _A , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase__ : Optional[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _A , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase__ : Tuple = betas_for_alpha_bar(_A , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": UpperCAmelCase__ : Dict = betas_for_alpha_bar(_A , alpha_transform_type='''exp''' ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase__ : List[str] = 1.0 - self.betas UpperCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_A , _A , _A ) UpperCAmelCase__ : Optional[int] = use_karras_sigmas def lowercase_ ( self : Dict , _A : Optional[Any] , _A : List[Any]=None ): '''simple docstring''' if schedule_timesteps is None: UpperCAmelCase__ : Union[str, Any] = self.timesteps UpperCAmelCase__ : int = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase__ : Dict = 1 if len(_A ) > 1 else 0 else: UpperCAmelCase__ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(_A ) else timestep UpperCAmelCase__ : Optional[int] = self._index_counter[timestep_int] return indices[pos].item() @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowercase_ ( self : List[Any] , _A : torch.FloatTensor , _A : Union[float, torch.FloatTensor] , ): '''simple docstring''' UpperCAmelCase__ : int = self.index_for_timestep(_A ) UpperCAmelCase__ : Optional[int] = self.sigmas[step_index] UpperCAmelCase__ : Tuple = sample / ((sigma**2 + 1) ** 0.5) return sample def lowercase_ ( self : Any , _A : int , _A : Union[str, torch.device] = None , _A : Optional[int] = None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = num_inference_steps UpperCAmelCase__ : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase__ : str = np.linspace(0 , num_train_timesteps - 1 , _A , dtype=_A )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase__ : Union[str, Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Dict = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(_A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase__ : List[str] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Union[str, Any] = (np.arange(_A , 0 , -step_ratio )).round().copy().astype(_A ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase__ : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase__ : Tuple = np.log(_A ) UpperCAmelCase__ : Optional[int] = np.interp(_A , np.arange(0 , len(_A ) ) , _A ) if self.config.use_karras_sigmas: UpperCAmelCase__ : Union[str, Any] = self._convert_to_karras(in_sigmas=_A , num_inference_steps=self.num_inference_steps ) UpperCAmelCase__ : Any = np.array([self._sigma_to_t(_A , _A ) for sigma in sigmas] ) UpperCAmelCase__ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase__ : Dict = torch.from_numpy(_A ).to(device=_A ) UpperCAmelCase__ : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase__ : Tuple = torch.from_numpy(_A ) UpperCAmelCase__ : int = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(_A ).startswith('''mps''' ): # mps does not support float64 UpperCAmelCase__ : Optional[int] = timesteps.to(_A , dtype=torch.floataa ) else: UpperCAmelCase__ : Union[str, Any] = timesteps.to(device=_A ) # empty dt and derivative UpperCAmelCase__ : Dict = None UpperCAmelCase__ : Optional[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase__ : List[str] = defaultdict(_A ) def lowercase_ ( self : int , _A : Any , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = np.log(_A ) # get distribution UpperCAmelCase__ : List[str] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase__ : List[str] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase__ : List[str] = low_idx + 1 UpperCAmelCase__ : Any = log_sigmas[low_idx] UpperCAmelCase__ : str = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase__ : List[Any] = (low - log_sigma) / (low - high) UpperCAmelCase__ : Any = np.clip(_A , 0 , 1 ) # transform interpolation to time range UpperCAmelCase__ : Any = (1 - w) * low_idx + w * high_idx UpperCAmelCase__ : int = t.reshape(sigma.shape ) return t def lowercase_ ( self : Optional[Any] , _A : torch.FloatTensor , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : int = in_sigmas[-1].item() UpperCAmelCase__ : Tuple = in_sigmas[0].item() UpperCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper UpperCAmelCase__ : Optional[Any] = np.linspace(0 , 1 , _A ) UpperCAmelCase__ : List[str] = sigma_min ** (1 / rho) UpperCAmelCase__ : List[str] = sigma_max ** (1 / rho) UpperCAmelCase__ : Dict = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowercase_ ( self : Tuple ): '''simple docstring''' return self.dt is None def lowercase_ ( self : Optional[Any] , _A : Union[torch.FloatTensor, np.ndarray] , _A : Union[float, torch.FloatTensor] , _A : Union[torch.FloatTensor, np.ndarray] , _A : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.index_for_timestep(_A ) # advance index counter by 1 UpperCAmelCase__ : Any = timestep.cpu().item() if torch.is_tensor(_A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase__ : Optional[Any] = self.sigmas[step_index] UpperCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase__ : Any = self.sigmas[step_index - 1] UpperCAmelCase__ : List[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase__ : str = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase__ : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase__ : Union[str, Any] = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase__ : Any = derivative UpperCAmelCase__ : List[str] = dt UpperCAmelCase__ : Optional[Any] = sample else: # 2. 2nd order / Heun's method UpperCAmelCase__ : List[str] = (sample - pred_original_sample) / sigma_next UpperCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase__ : List[str] = self.dt UpperCAmelCase__ : int = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : int = None UpperCAmelCase__ : Dict = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_A ) def lowercase_ ( self : Tuple , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_A ): # mps does not support float64 UpperCAmelCase__ : Dict = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase__ : Any = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device ) UpperCAmelCase__ : Union[str, Any] = timesteps.to(original_samples.device ) UpperCAmelCase__ : List[Any] = [self.index_for_timestep(_A , _A ) for t in timesteps] UpperCAmelCase__ : Union[str, Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase__ : Optional[int] = sigma.unsqueeze(-1 ) UpperCAmelCase__ : Any = original_samples + noise * sigma return noisy_samples def __len__( self : Tuple ): '''simple docstring''' return self.config.num_train_timesteps
181
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __UpperCAmelCase =["gpt2"] __UpperCAmelCase ="gpt2" if is_tf_available(): class a__ ( tf.Module ): def __init__( self : str , a : Union[str, Any] ): """simple docstring""" super().__init__() __lowerCamelCase = tokenizer __lowerCamelCase = AutoConfig.from_pretrained(a ) __lowerCamelCase = TFGPTaLMHeadModel.from_config(a ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) ) def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ): """simple docstring""" __lowerCamelCase = self.tokenizer(a ) __lowerCamelCase = tokenized['''input_ids'''].to_tensor() __lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) __lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits'''] return outputs @require_tf @require_keras_nlp class a__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" super().setUp() __lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)] __lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __lowerCamelCase = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] __lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: __lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' ) __lowerCamelCase = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors __lowerCamelCase = python_outputs[key].numpy() __lowerCamelCase = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase = tf.function(a ) for test_inputs in self.test_sentences: __lowerCamelCase = tf.constant(a ) __lowerCamelCase = compiled_tokenizer(a ) __lowerCamelCase = tf_tokenizer(a ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase = ModelToSave(tokenizer=a ) __lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] ) __lowerCamelCase = model.serving(a ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __lowerCamelCase = Path(a ) / '''saved.model''' tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} ) __lowerCamelCase = tf.saved_model.load(a ) __lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] ) __lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs __lowerCamelCase = tf_tokenizer.get_config() __lowerCamelCase = TFGPTaTokenizer.from_config(a ) __lowerCamelCase = model_from_config(a ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: # for the test to run __lowerCamelCase = 12_31_23 for max_length in [3, 5, 10_24]: __lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] ) __lowerCamelCase = tf_tokenizer(a , max_length=a ) __lowerCamelCase = out['''input_ids'''].numpy().shape[1] assert out_length == max_length
67
0
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def a_ ( __snake_case : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_ =torch.exp(UpperCamelCase__ ) lowerCamelCase_ =torch.sum(UpperCamelCase__ , dim=1 ) # sum of exp(x_i) lowerCamelCase_ =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(UpperCamelCase__ ) - B / A class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =config.output_attentions lowerCamelCase_ =config.output_hidden_states lowerCamelCase_ =nn.ModuleList([BertLayer(lowerCAmelCase ) for _ in range(config.num_hidden_layers )] ) lowerCamelCase_ =nn.ModuleList([BertHighway(lowerCAmelCase ) for _ in range(config.num_hidden_layers )] ) lowerCamelCase_ =[-1 for _ in range(config.num_hidden_layers )] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if (type(lowerCAmelCase ) is float) or (type(lowerCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): lowerCamelCase_ =x else: lowerCamelCase_ =x def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =() lowerCamelCase_ =() lowerCamelCase_ =() for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: lowerCamelCase_ =all_hidden_states + (hidden_states,) lowerCamelCase_ =layer_module( lowerCAmelCase, lowerCAmelCase, head_mask[i], lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =layer_outputs[0] if self.output_attentions: lowerCamelCase_ =all_attentions + (layer_outputs[1],) lowerCamelCase_ =(hidden_states,) if self.output_hidden_states: lowerCamelCase_ =current_outputs + (all_hidden_states,) if self.output_attentions: lowerCamelCase_ =current_outputs + (all_attentions,) lowerCamelCase_ =self.highway[i](lowerCAmelCase ) # logits, pooled_output if not self.training: lowerCamelCase_ =highway_exit[0] lowerCamelCase_ =entropy(lowerCAmelCase ) lowerCamelCase_ =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy lowerCamelCase_ =all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: lowerCamelCase_ =(highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(lowerCAmelCase, i + 1 ) else: lowerCamelCase_ =all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: lowerCamelCase_ =all_hidden_states + (hidden_states,) lowerCamelCase_ =(hidden_states,) if self.output_hidden_states: lowerCamelCase_ =outputs + (all_hidden_states,) if self.output_attentions: lowerCamelCase_ =outputs + (all_attentions,) lowerCamelCase_ =outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( 'The Bert Model transformer with early exiting (DeeBERT). ' , UpperCAmelCase__ , ) class __UpperCamelCase ( UpperCAmelCase__ ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =config lowerCamelCase_ =BertEmbeddings(lowerCAmelCase ) lowerCamelCase_ =DeeBertEncoder(lowerCAmelCase ) lowerCamelCase_ =BertPooler(lowerCAmelCase ) self.init_weights() def lowercase__ ( self ): """simple docstring""" self.encoder.init_highway_pooler(self.pooler ) def lowercase__ ( self ): """simple docstring""" return self.embeddings.word_embeddings def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =value def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase ) @add_start_docstrings_to_model_forward(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: lowerCamelCase_ =input_ids.size() elif inputs_embeds is not None: lowerCamelCase_ =inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) lowerCamelCase_ =input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: lowerCamelCase_ =torch.ones(lowerCAmelCase, device=lowerCAmelCase ) if encoder_attention_mask is None: lowerCamelCase_ =torch.ones(lowerCAmelCase, device=lowerCAmelCase ) if token_type_ids is None: lowerCamelCase_ =torch.zeros(lowerCAmelCase, dtype=torch.long, device=lowerCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. lowerCamelCase_ =self.get_extended_attention_mask(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: lowerCamelCase_ =encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: lowerCamelCase_ =encoder_attention_mask[:, None, None, :] lowerCamelCase_ =encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility lowerCamelCase_ =(1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] lowerCamelCase_ =self.get_head_mask(lowerCAmelCase, self.config.num_hidden_layers ) lowerCamelCase_ =self.embeddings( input_ids=lowerCAmelCase, position_ids=lowerCAmelCase, token_type_ids=lowerCAmelCase, inputs_embeds=lowerCAmelCase ) lowerCamelCase_ =self.encoder( lowerCAmelCase, attention_mask=lowerCAmelCase, head_mask=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =encoder_outputs[0] lowerCamelCase_ =self.pooler(lowerCAmelCase ) lowerCamelCase_ =( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class __UpperCamelCase ( UpperCAmelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =message lowerCamelCase_ =exit_layer # start from 1! class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =BertPooler(lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase_ =nn.Linear(config.hidden_size, config.num_labels ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =encoder_outputs[0] lowerCamelCase_ =self.pooler(lowerCAmelCase ) # "return" pooler_output # BertModel lowerCamelCase_ =(pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification lowerCamelCase_ =bmodel_output[1] lowerCamelCase_ =self.dropout(lowerCAmelCase ) lowerCamelCase_ =self.classifier(lowerCAmelCase ) return logits, pooled_output @add_start_docstrings( 'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , UpperCAmelCase__ , ) class __UpperCamelCase ( UpperCAmelCase__ ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =config.num_labels lowerCamelCase_ =config.num_hidden_layers lowerCamelCase_ =DeeBertModel(lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase_ =nn.Linear(config.hidden_size, self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=-1, lowerCAmelCase=False, ): """simple docstring""" lowerCamelCase_ =self.num_layers try: lowerCamelCase_ =self.bert( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, position_ids=lowerCAmelCase, head_mask=lowerCAmelCase, inputs_embeds=lowerCAmelCase, ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits lowerCamelCase_ =outputs[1] lowerCamelCase_ =self.dropout(lowerCAmelCase ) lowerCamelCase_ =self.classifier(lowerCAmelCase ) lowerCamelCase_ =(logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCamelCase_ =e.message lowerCamelCase_ =e.exit_layer lowerCamelCase_ =outputs[0] if not self.training: lowerCamelCase_ =entropy(lowerCAmelCase ) lowerCamelCase_ =[] lowerCamelCase_ =[] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCamelCase_ =MSELoss() lowerCamelCase_ =loss_fct(logits.view(-1 ), labels.view(-1 ) ) else: lowerCamelCase_ =CrossEntropyLoss() lowerCamelCase_ =loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) # work with highway exits lowerCamelCase_ =[] for highway_exit in outputs[-1]: lowerCamelCase_ =highway_exit[0] if not self.training: highway_logits_all.append(lowerCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCamelCase_ =MSELoss() lowerCamelCase_ =loss_fct(highway_logits.view(-1 ), labels.view(-1 ) ) else: lowerCamelCase_ =CrossEntropyLoss() lowerCamelCase_ =loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) ) highway_losses.append(lowerCAmelCase ) if train_highway: lowerCamelCase_ =(sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCamelCase_ =(loss,) + outputs if not self.training: lowerCamelCase_ =outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCamelCase_ =( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
75
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) ) else: return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float: if b < 0: return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ ) return actual_power(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": print(power(-2, -3))
67
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : List[str] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class lowercase__ ( UpperCAmelCase__): UpperCamelCase_ = "camembert" def __init__( self : int , UpperCamelCase__ : Dict=3_0522 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-12 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Union[str, Any]="absolute" , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Any , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : int = position_embedding_type SCREAMING_SNAKE_CASE : Dict = use_cache SCREAMING_SNAKE_CASE : Any = classifier_dropout class lowercase__ ( UpperCAmelCase__): @property def __A ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
182
'''simple docstring''' import logging import os from .state import PartialState class a__ ( logging.LoggerAdapter ): @staticmethod def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ): """simple docstring""" __lowerCamelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ): """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) __lowerCamelCase = kwargs.pop('''main_process_only''' , a ) __lowerCamelCase = kwargs.pop('''in_order''' , a ) if self.isEnabledFor(a ): if self._should_log(a ): __lowerCamelCase , __lowerCamelCase = self.process(a , a ) self.logger.log(a , a , *a , **a ) elif in_order: __lowerCamelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __lowerCamelCase , __lowerCamelCase = self.process(a , a ) self.logger.log(a , a , *a , **a ) state.wait_for_everyone() def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]: if log_level is None: __lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ ) __lowerCamelCase = logging.getLogger(UpperCamelCase__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(UpperCamelCase__ , {} )
67
0
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def lowerCAmelCase__ ( ) -> str: '''simple docstring''' A__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) A__ = parser.parse_args_into_dataclasses()[0] A__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: A__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: A__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." A__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) A__ = "" A__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) A__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: A__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
68
def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any: '''simple docstring''' A__ = 1 A__ = 2 while i * i <= n: A__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
68
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCAmelCase__ = False class a__ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = "A painting of a squirrel eating a burger " A__ = torch.manual_seed(0 ) A__ = pipe( prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = generator.manual_seed(0 ) A__ = pipe( prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = "A painting of a squirrel eating a burger " A__ = torch.manual_seed(0 ) A__ = pipe( prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images A__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'switch_transformers' __lowerCamelCase = ['past_key_values'] __lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , lowercase=32128 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=8 , lowercase=False , lowercase=0.01 , lowercase="float32" , lowercase=False , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1e-6 , lowercase=0.001 , lowercase=0.001 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=False , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ) -> Tuple: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_sparse_encoder_layers A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: A__ = self.num_layers // self.num_sparse_encoder_layers else: A__ = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: A__ = self.num_decoder_layers // self.num_sparse_decoder_layers else: A__ = self.num_decoder_layers # HACK: this will create 0 sparse layers A__ = num_heads A__ = num_experts A__ = expert_capacity A__ = router_bias A__ = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) A__ = router_dtype A__ = router_ignore_padding_tokens A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = add_router_probs A__ = router_z_loss_coef A__ = router_aux_loss_coef A__ = self.feed_forward_proj.split("-" ) A__ = act_info[-1] A__ = act_info[0] == "gated" if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = "gelu_new" super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , **lowercase , )
68
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
import copy import random from transformers import CLIPTokenizer class a__ ( snake_case ): """simple docstring""" def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowercase , **lowercase ) A__ = {} def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str: '''simple docstring''' A__ = super().add_tokens(lowercase , *lowercase , **lowercase ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any: '''simple docstring''' A__ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) else: A__ = [] for i in range(lowercase ): A__ = placeholder_token + F'_{i}' self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) A__ = output def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = [] for i in range(len(lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: A__ = self.token_map[placeholder_token] A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )] if vector_shuffle: A__ = copy.copy(lowercase ) random.shuffle(lowercase ) A__ = text.replace(lowercase , " ".join(lowercase ) ) return text def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
68
1
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = inspect.getfile(accelerate.test_utils ) A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split() A__ = [sys.executable] + distributed_args execute_subprocess_async(lowercase , env=os.environ.copy() )
68
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
1
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
import datasets from .evaluate import evaluate lowerCAmelCase__ = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCAmelCase__ = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCAmelCase__ = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=lowercase , predictions=lowercase ) return score
68
1
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class a__ : """simple docstring""" def __init__( self , lowercase , ) -> List[Any]: '''simple docstring''' A__ = parent A__ = 13 A__ = 7 A__ = True A__ = True A__ = False A__ = True A__ = 99 A__ = 32 A__ = 2 A__ = 4 A__ = 37 A__ = "gelu" A__ = 0.1 A__ = 0.1 A__ = 512 A__ = 16 A__ = 2 A__ = 0.02 A__ = 3 A__ = 4 A__ = None def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = ids_tensor([self.batch_size] , self.num_choices ) A__ = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = TFDistilBertModel(config=lowercase ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase ) A__ = [input_ids, input_mask] A__ = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = TFDistilBertForMaskedLM(config=lowercase ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFDistilBertForQuestionAnswering(config=lowercase ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, } A__ = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: '''simple docstring''' A__ = self.num_labels A__ = TFDistilBertForSequenceClassification(lowercase ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.num_choices A__ = TFDistilBertForMultipleChoice(lowercase ) A__ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) A__ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) A__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } A__ = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.num_labels A__ = TFDistilBertForTokenClassification(lowercase ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs A__ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __lowerCamelCase = ( { 'feature-extraction': TFDistilBertModel, 'fill-mask': TFDistilBertForMaskedLM, 'question-answering': TFDistilBertForQuestionAnswering, 'text-classification': TFDistilBertForSequenceClassification, 'token-classification': TFDistilBertForTokenClassification, 'zero-shot': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = TFDistilBertModelTester(self ) A__ = ConfigTester(self , config_class=lowercase , dim=37 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def UpperCamelCase ( self ) -> str: '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): A__ = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class a__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) A__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A__ = model(lowercase )[0] A__ = [1, 6, 768] self.assertEqual(output.shape , lowercase ) A__ = tf.constant( [ [ [0.1926_1885, -0.1373_2955, 0.411_9799], [0.2215_0156, -0.0742_2661, 0.3903_7204], [0.2275_6018, -0.089_6414, 0.370_1467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
68
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
1
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase__ = get_logger(__name__) class a__ : """simple docstring""" __lowerCamelCase = 'dummy_data' __lowerCamelCase = 'datasets' __lowerCamelCase = False def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ) -> Dict: '''simple docstring''' A__ = 0 A__ = dataset_name A__ = cache_dir A__ = use_local_dummy_data A__ = config # download_callbacks take a single url as input A__ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root A__ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general A__ = str(lowercase ) # to be downloaded A__ = None A__ = None @property def UpperCamelCase ( self ) -> Dict: '''simple docstring''' if self._dummy_file is None: A__ = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def UpperCamelCase ( self ) -> Any: '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) A__ = cached_path( lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase ) return os.path.join(lowercase , self.dummy_file_name ) @property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' if self._bucket_url is None: A__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def UpperCamelCase ( self ) -> Any: '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def UpperCamelCase ( self , lowercase , *lowercase ) -> Any: '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested A__ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned A__ = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase , lowercase ): return self.create_dummy_data_dict(lowercase , lowercase ) elif isinstance(lowercase , (list, tuple) ): return self.create_dummy_data_list(lowercase , lowercase ) else: return self.create_dummy_data_single(lowercase , lowercase ) def UpperCamelCase ( self , lowercase , *lowercase ) -> Dict: '''simple docstring''' return self.download_and_extract(lowercase ) def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' return self.download_and_extract(lowercase ) def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> Optional[int]: '''simple docstring''' return path def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return {} def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase , lowercase ): for single_url in single_urls: download_callback(lowercase ) else: A__ = single_urls download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase , lowercase ): A__ = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls] else: A__ = single_urls A__ = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) A__ = value # make sure that values are unique if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique A__ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase ( self , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one A__ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowercase ) ) for url in data_url ) A__ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): A__ = [data_url[0]] * len(lowercase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus A__ = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(lowercase ) return dummy_data_list def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]: '''simple docstring''' for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus A__ = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(lowercase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase ( self ) -> int: '''simple docstring''' pass def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' def _iter_archive_members(lowercase ): # this preserves the order of the members inside the ZIP archive A__ = Path(self.dummy_file ).parent A__ = path.relative_to(lowercase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: A__ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase ) A__ = Path(lowercase ) A__ = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(lowercase ).as_posix(), file_path.open("rb" ) def UpperCamelCase ( self , lowercase ) -> Dict: '''simple docstring''' if not isinstance(lowercase , lowercase ): A__ = [paths] for path in paths: if os.path.isfile(lowercase ): if os.path.basename(lowercase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase ): if os.path.basename(lowercase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(lowercase ): if filename.startswith((".", "__") ): continue yield os.path.join(lowercase , lowercase )
68
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} ) __lowerCamelCase = Features({'audio': Audio()} ) __lowerCamelCase = Features({'transcription': Value('string' )} ) __lowerCamelCase = "audio" __lowerCamelCase = "transcription" def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.audio_column not in features: raise ValueError(F'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase ): raise ValueError(F'Column {self.audio_column} is not an Audio type.' ) A__ = copy.deepcopy(self ) A__ = self.input_schema.copy() A__ = features[self.audio_column] A__ = input_schema return task_template @property def UpperCamelCase ( self ) -> Dict[str, str]: '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
68
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
1
lowerCAmelCase__ = tuple[float, float, float] lowerCAmelCase__ = tuple[float, float, float] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Pointad , SCREAMING_SNAKE_CASE_: Pointad ) -> Vectorad: '''simple docstring''' A__ = end_pointa[0] - end_pointa[0] A__ = end_pointa[1] - end_pointa[1] A__ = end_pointa[2] - end_pointa[2] return (x, y, z) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Vectorad , SCREAMING_SNAKE_CASE_: Vectorad ) -> Vectorad: '''simple docstring''' A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Vectorad , SCREAMING_SNAKE_CASE_: int ) -> bool: '''simple docstring''' return tuple(round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Pointad , SCREAMING_SNAKE_CASE_: Pointad , SCREAMING_SNAKE_CASE_: Pointad , SCREAMING_SNAKE_CASE_: int = 1_0 ) -> bool: '''simple docstring''' A__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
68
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase__ = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase__ = { """google/realm-cc-news-pretrained-embedder""": 5_1_2, """google/realm-cc-news-pretrained-encoder""": 5_1_2, """google/realm-cc-news-pretrained-scorer""": 5_1_2, """google/realm-cc-news-pretrained-openqa""": 5_1_2, """google/realm-orqa-nq-openqa""": 5_1_2, """google/realm-orqa-nq-reader""": 5_1_2, """google/realm-orqa-wq-openqa""": 5_1_2, """google/realm-orqa-wq-reader""": 5_1_2, } lowerCAmelCase__ = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = RealmTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[str]: '''simple docstring''' super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowercase ) != do_lower_case or normalizer_state.get("strip_accents" , lowercase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars ): A__ = getattr(lowercase , normalizer_state.pop("type" ) ) A__ = do_lower_case A__ = strip_accents A__ = tokenize_chinese_chars A__ = normalizer_class(**lowercase ) A__ = do_lower_case def UpperCamelCase ( self , lowercase , **lowercase ) -> Optional[int]: '''simple docstring''' A__ = PaddingStrategy.MAX_LENGTH A__ = text A__ = kwargs.pop("text_pair" , lowercase ) A__ = kwargs.pop("return_tensors" , lowercase ) A__ = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(lowercase ): if batch_text_pair is not None: A__ = batch_text_pair[idx] else: A__ = None A__ = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase ) A__ = encoded_candidates.get("input_ids" ) A__ = encoded_candidates.get("attention_mask" ) A__ = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowercase ) A__ = {key: item for key, item in output_data.items() if len(lowercase ) != 0} return BatchEncoding(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase=None ) -> str: '''simple docstring''' A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: '''simple docstring''' A__ = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
68
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
1
from PIL import Image def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image , SCREAMING_SNAKE_CASE_: int ) -> Image: '''simple docstring''' A__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level)) def contrast(SCREAMING_SNAKE_CASE_: int ) -> int: return int(1_2_8 + factor * (c - 1_2_8) ) return img.point(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 lowerCAmelCase__ = change_contrast(img, 1_7_0) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
68
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase=32 ) -> Union[str, Any]: '''simple docstring''' set_seed(0 ) A__ = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 ) A__ = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable A__ = DDPMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowercase , ) A__ = DDIMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowercase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) A__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )] A__ = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )] A__ = [torch.randint(0 , 1000 , (4,) ).long().to(lowercase ) for _ in range(4 )] # train with a DDPM scheduler A__ , A__ = self.get_model_optimizer(resolution=32 ) model.train().to(lowercase ) for i in range(4 ): optimizer.zero_grad() A__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) A__ = model(lowercase , timesteps[i] ).sample A__ = torch.nn.functional.mse_loss(lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM A__ , A__ = self.get_model_optimizer(resolution=32 ) model.train().to(lowercase ) for i in range(4 ): optimizer.zero_grad() A__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) A__ = model(lowercase , timesteps[i] ).sample A__ = torch.nn.functional.mse_loss(lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) ) self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
68
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
1
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float("""nan""") class a__ : """simple docstring""" def __init__( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = sys.stdout A__ = open(lowercase , "a" ) def __getattr__( self , lowercase ) -> List[Any]: '''simple docstring''' return getattr(self.stdout , lowercase ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' self.stdout.write(lowercase ) # strip tqdm codes self.file.write(re.sub(R"^.*\r" , "" , lowercase , 0 , re.M ) ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int]=8_0 , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Union[str, Any]: '''simple docstring''' A__ = [] # deal with critical env vars A__ = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: A__ = os.environ.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A__ = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(SCREAMING_SNAKE_CASE_ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A__ = [] A__ = "" while len(SCREAMING_SNAKE_CASE_ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(SCREAMING_SNAKE_CASE_ ) A__ = "" return "\\\n".join(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int ) -> Union[str, Any]: '''simple docstring''' A__ = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own A__ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A__ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str ) -> int: '''simple docstring''' if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A__ = subprocess.run(SCREAMING_SNAKE_CASE_ , capture_output=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams A__ = variation.replace(" " , "-" ) with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stdout.txt' , "w" ) as f: f.write(result.stdout ) with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stderr.txt' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , "r" , encoding="utf-8" ) as f: A__ = json.load(SCREAMING_SNAKE_CASE_ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str , ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = F'{id}: {variation:<{longest_variation_len}}' A__ = F'{preamble}: ' A__ = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(SCREAMING_SNAKE_CASE_ ) , desc=SCREAMING_SNAKE_CASE_ , leave=SCREAMING_SNAKE_CASE_ ): A__ = process_run_single( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ = single_run_metrics[target_metric_key] if not math.isnan(SCREAMING_SNAKE_CASE_ ): metrics.append(SCREAMING_SNAKE_CASE_ ) results.append(SCREAMING_SNAKE_CASE_ ) outcome += "✓" else: outcome += "✘" A__ = F'\33[2K\r{outcome}' if len(SCREAMING_SNAKE_CASE_ ) > 0: A__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A__ = round(mean_metrics[target_metric_key] , 2 ) A__ = F'{outcome} {mean_target}' if len(SCREAMING_SNAKE_CASE_ ) > 1: results_str += F' {tuple(round(SCREAMING_SNAKE_CASE_ , 2 ) for x in results )}' print(SCREAMING_SNAKE_CASE_ ) A__ = variation return mean_metrics else: print(SCREAMING_SNAKE_CASE_ ) return {variation_key: variation, target_metric_key: nan} def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' A__ = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n' def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = pd.DataFrame(SCREAMING_SNAKE_CASE_ ) A__ = "variation" A__ = "diff_%" A__ = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A__ = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(SCREAMING_SNAKE_CASE_ ): # as a fallback, use the minimal value as the sentinel A__ = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(SCREAMING_SNAKE_CASE_ ): A__ = df.apply( lambda SCREAMING_SNAKE_CASE_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns A__ = [variation_key, target_metric_key, diff_key, *report_metric_keys] A__ = df.reindex(SCREAMING_SNAKE_CASE_ , axis="columns" ) # reorder cols # capitalize A__ = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "<br>" ) , axis="columns" ) A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "\n" ) , axis="columns" ) A__ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )] print("\n\n".join(SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase__ ( ) -> Optional[int]: '''simple docstring''' A__ = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Base cmd" , ) parser.add_argument( "--variations" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , nargs="+" , required=SCREAMING_SNAKE_CASE_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=SCREAMING_SNAKE_CASE_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=SCREAMING_SNAKE_CASE_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) A__ = parser.parse_args() A__ = args.output_dir Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = get_base_command(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # split each dimension into its --foo variations A__ = [list(map(str.strip , re.split(R"\|" , SCREAMING_SNAKE_CASE_ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A__ = list(map(str.strip , map(" ".join , itertools.product(*SCREAMING_SNAKE_CASE_ ) ) ) ) A__ = max(len(SCREAMING_SNAKE_CASE_ ) for x in variations ) # split wanted keys A__ = args.report_metric_keys.split() # capture prints into a log file for convenience A__ = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A__ = Tee(SCREAMING_SNAKE_CASE_ ) print(F'\n*** Running {len(SCREAMING_SNAKE_CASE_ )} benchmarks:' ) print(F'Base command: {" ".join(SCREAMING_SNAKE_CASE_ )}' ) A__ = "variation" A__ = [] for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="Total completion: " , leave=SCREAMING_SNAKE_CASE_ ) ): A__ = base_cmd + variation.split() results.append( process_run( id + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.repeat_times , SCREAMING_SNAKE_CASE_ , args.verbose , ) ) process_results(SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.base_variation , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
68
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowerCAmelCase__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __lowerCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __lowerCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __lowerCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) A__ = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "LABEL_0", "score": 0.504}] ) A__ = text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(lowercase ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) A__ = text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(lowercase ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) A__ = text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "LABEL_0", "score": 0.504}] ) # Legacy behavior A__ = text_classifier("This is great !" , return_all_scores=lowercase ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "LABEL_0", "score": 0.504}] ) A__ = text_classifier("This is great !" , return_all_scores=lowercase ) self.assertEqual( nested_simplify(lowercase ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) A__ = text_classifier(["This is great !", "Something else"] , return_all_scores=lowercase ) self.assertEqual( nested_simplify(lowercase ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) A__ = text_classifier(["This is great !", "Something else"] , return_all_scores=lowercase ) self.assertEqual( nested_simplify(lowercase ) , [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ] , ) @require_torch def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' import torch A__ = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) A__ = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "LABEL_0", "score": 0.504}] ) @require_tf def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) A__ = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "LABEL_0", "score": 0.504}] ) @slow @require_torch def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = pipeline("text-classification" ) A__ = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "POSITIVE", "score": 1.0}] ) A__ = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "NEGATIVE", "score": 1.0}] ) A__ = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "POSITIVE", "score": 0.988}] ) @slow @require_tf def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = pipeline("text-classification" , framework="tf" ) A__ = text_classifier("This is great !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "POSITIVE", "score": 1.0}] ) A__ = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "NEGATIVE", "score": 1.0}] ) A__ = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(lowercase ) , [{"label": "POSITIVE", "score": 0.988}] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[str]: '''simple docstring''' A__ = TextClassificationPipeline(model=lowercase , tokenizer=lowercase ) return text_classifier, ["HuggingFace is in", "This is another test"] def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]: '''simple docstring''' A__ = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 A__ = "HuggingFace is in" A__ = text_classifier(lowercase ) self.assertEqual(nested_simplify(lowercase ) , [{"label": ANY(lowercase ), "score": ANY(lowercase )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) A__ = ["HuggingFace is in ", "Paris is in France"] A__ = text_classifier(lowercase ) self.assertEqual( nested_simplify(lowercase ) , [{"label": ANY(lowercase ), "score": ANY(lowercase )}, {"label": ANY(lowercase ), "score": ANY(lowercase )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format A__ = text_classifier(lowercase , top_k=lowercase ) A__ = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(lowercase ) , [[{"label": ANY(lowercase ), "score": ANY(lowercase )}] * N, [{"label": ANY(lowercase ), "score": ANY(lowercase )}] * N] , ) A__ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} A__ = text_classifier(lowercase ) self.assertEqual( nested_simplify(lowercase ) , {"label": ANY(lowercase ), "score": ANY(lowercase )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. A__ = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(lowercase ): text_classifier(lowercase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility A__ = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(lowercase ) , [{"label": ANY(lowercase ), "score": ANY(lowercase )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
68
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[Any]=False ) -> Union[str, Any]: '''simple docstring''' A__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Dict=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: A__ = "" else: A__ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) A__ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[ : config.hidden_size, : ] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Union[str, Any]: '''simple docstring''' A__ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] ) -> Any: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE_ ) A__ = val def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' A__ = "http://images.cocodataset.org/val2017/000000039769.jpg" A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: str ) -> str: '''simple docstring''' A__ = ViTConfig() A__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A__ = True A__ = int(vit_name[-1_2:-1_0] ) A__ = int(vit_name[-9:-6] ) else: A__ = 1_0_0_0 A__ = "huggingface/label-files" A__ = "imagenet-1k-id2label.json" A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = int(vit_name[-6:-4] ) A__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A__ = 1_9_2 A__ = 7_6_8 A__ = 1_2 A__ = 3 elif vit_name[9:].startswith("small" ): A__ = 3_8_4 A__ = 1_5_3_6 A__ = 1_2 A__ = 6 else: pass else: if vit_name[4:].startswith("small" ): A__ = 7_6_8 A__ = 2_3_0_4 A__ = 8 A__ = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A__ = 1_0_2_4 A__ = 4_0_9_6 A__ = 2_4 A__ = 1_6 elif vit_name[4:].startswith("huge" ): A__ = 1_2_8_0 A__ = 5_1_2_0 A__ = 3_2 A__ = 1_6 # load original model from timm A__ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ = timm_model.state_dict() if base_model: remove_classification_head_(SCREAMING_SNAKE_CASE_ ) A__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model if vit_name[-5:] == "in21k": A__ = ViTModel(SCREAMING_SNAKE_CASE_ ).eval() else: A__ = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A__ = DeiTImageProcessor(size=config.image_size ) else: A__ = ViTImageProcessor(size=config.image_size ) A__ = image_processor(images=prepare_img() , return_tensors="pt" ) A__ = encoding["pixel_values"] A__ = model(SCREAMING_SNAKE_CASE_ ) if base_model: A__ = timm_model.forward_features(SCREAMING_SNAKE_CASE_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.pooler_output , atol=1e-3 ) else: A__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
68
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase__ = """\ Text data. Second line of data.""" lowerCAmelCase__ = """file""" @pytest.fixture(scope="session" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any: '''simple docstring''' A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A__ = input_paths[compression_format] A__ = tmp_path / "cache" A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' A__ = "custom_cache" A__ = "custom_extracted_dir" A__ = tmp_path / "custom_extracted_path" if default_extracted: A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) ) A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A__ = xz_file A__ = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' A__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path A__ = "./__missing_file__.txt" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("s3://huggingface.co" )
68
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=snake_case ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) __lowerCamelCase = Features({'image': Image()} ) __lowerCamelCase = Features({'labels': ClassLabel} ) __lowerCamelCase = "image" __lowerCamelCase = "labels" def UpperCamelCase ( self , lowercase ) -> List[Any]: '''simple docstring''' if self.label_column not in features: raise ValueError(F'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowercase ): raise ValueError(F'Column {self.label_column} is not a ClassLabel.' ) A__ = copy.deepcopy(self ) A__ = self.label_schema.copy() A__ = features[self.label_column] A__ = label_schema return task_template @property def UpperCamelCase ( self ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
68
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = WavaVecaPhonemeCTCTokenizer __lowerCamelCase = False def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' super().setUp() A__ = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" " ) A__ = dict(zip(lowercase , range(len(lowercase ) ) ) ) A__ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase ) + "\n" ) def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Tuple[str, list]: '''simple docstring''' A__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )) for i in range(len(lowercase ) )] A__ = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: A__ = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: A__ = toks + toks # toks_str = [t[1] for t in toks] A__ = [t[0] for t in toks] # Ensure consistency A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: A__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: A__ = " " + output_txt A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def UpperCamelCase ( self , **lowercase ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) # check adding a single token tokenizer.add_tokens("xxx" ) A__ = tokenizer("m xxx ɪ" , do_phonemize=lowercase ).input_ids self.assertEqual(lowercase , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"] ) A__ = tokenizer("m aaa ɪ ccc" , do_phonemize=lowercase ).input_ids self.assertEqual(lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa A__ = tokenizer("maɪ c" , do_phonemize=lowercase ).input_ids self.assertEqual(lowercase , [3, 200] ) # mai should be <unk> (=3) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) self.assertEqual(lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) A__ = tokenizer.decode(tokenizer(lowercase ).input_ids ) self.assertEqual(lowercase , lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) A__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A__ = tokenizer.decode(sample_ids[0] ) A__ = tokenizer.batch_decode(lowercase ) self.assertEqual(lowercase , batch_tokens[0] ) self.assertEqual(lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) self.assertEqual(lowercase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(lowercase ).input_ids , tokenizer(lowercase , do_phonemize=lowercase ).input_ids ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off A__ = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A__ = tokenizer.decode(sample_ids[0] ) A__ = tokenizer.batch_decode(lowercase ) self.assertEqual(lowercase , batch_tokens[0] ) self.assertEqual(lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) # decode with no word_del_token filter A__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase ) A__ = tokenizer.batch_decode(lowercase , filter_word_delimiter_token=lowercase ) self.assertEqual(lowercase , batch_tokens[0] ) self.assertEqual(lowercase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) A__ = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase ) self.assertEqual(lowercase , lowercase ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) A__ = "Hello how are you" A__ = tokenizer.phonemize(lowercase , phonemizer_lang="en-us" ) A__ = tokenizer.decode(tokenizer(lowercase ).input_ids , filter_word_delimiter_token=lowercase ) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=lowercase ) A__ = "Hello how are you" A__ = tokenizer(lowercase , phonemizer_lang="en-us" ).input_ids A__ = tokenizer(lowercase , phonemizer_lang="fr-fr" ).input_ids self.assertNotEqual(lowercase , lowercase ) A__ = tokenizer.decode(lowercase ) A__ = tokenizer.decode(lowercase ) self.assertEqual(lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" ) self.assertEqual(lowercase , "ɛ l o h aʊ a ʁ j u" ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) A__ = "Hello how Are you" A__ = "hello how are you" A__ = tokenizer(lowercase ).input_ids A__ = tokenizer(lowercase ).input_ids self.assertEqual(lowercase , lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) tokenizer.add_tokens(["!", "?"] ) tokenizer.add_special_tokens({"cls_token": "$$$"} ) # fmt: off A__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on A__ = tokenizer.batch_decode(lowercase ) self.assertEqual(lowercase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] ) @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> List[str]: '''simple docstring''' A__ = [d[key] for d in offsets] return retrieved_list def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.get_tokenizer(word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A__ = tokenizer.decode(lowercase , output_char_offsets=lowercase , filter_word_delimiter_token=lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue("text" in outputs ) self.assertTrue("char_offsets" in outputs ) self.assertTrue(isinstance(lowercase , lowercase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.get_tokenizer(word_delimiter_token="|" ) def check_list_tuples_equal(lowercase , lowercase ): self.assertTrue(isinstance(lowercase , lowercase ) ) self.assertTrue(isinstance(outputs_list[0] , lowercase ) ) # transform list to ModelOutput A__ = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] ) def recursive_check(lowercase , lowercase ): if isinstance(lowercase , lowercase ): [recursive_check(lowercase , lowercase ) for la, la in zip(lowercase , lowercase )] self.assertEqual(lowercase , lowercase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] ) # fmt: off A__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A__ = tokenizer.batch_decode(lowercase , output_char_offsets=lowercase ) A__ = [tokenizer.decode(lowercase , output_char_offsets=lowercase ) for ids in sample_ids] check_list_tuples_equal(lowercase , lowercase ) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" ) def UpperCamelCase ( self ) -> int: '''simple docstring''' pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def UpperCamelCase ( self ) -> int: '''simple docstring''' pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] A__ = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(output["text"] , lowercase )
68
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) A__ = (size["height"], size["width"]) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase ) return encoded_outputs
68
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = None __lowerCamelCase = None def lowerCAmelCase__ ( ) -> Node | None: '''simple docstring''' A__ = Node(1 ) A__ = Node(2 ) A__ = Node(3 ) A__ = Node(4 ) A__ = Node(5 ) return tree def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]: '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]: '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]: '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int: '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None]: '''simple docstring''' A__ = [] if root is None: return output A__ = deque([root] ) while process_queue: A__ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]: '''simple docstring''' A__ = [] def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]: '''simple docstring''' A__ = [] def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None] | list[Any]: '''simple docstring''' if root is None: return [] A__ = [] A__ = 0 A__ = height(SCREAMING_SNAKE_CASE_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A__ = 1 else: output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A__ = 0 return output def lowerCAmelCase__ ( ) -> None: # Main function for testing. '''simple docstring''' A__ = make_tree() print(F'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}' ) print(F'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}' ) print(F'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}' , "\n" ) print(F'Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}' , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(SCREAMING_SNAKE_CASE_ ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ): print(F'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) ) print("\nZigZag order Traversal: " ) print(zigzag(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A__ = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A__ = cached_file(lowercase , lowercase , revision="9b8c223" ) self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): A__ = cached_file("tiny-random-bert" , lowercase ) with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): A__ = cached_file(lowercase , lowercase , revision="aaaa" ) with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) ) A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = mock.Mock() A__ = 500 A__ = {} A__ = HTTPError A__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head: A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" ) A__ = get_file_from_repo("bert-base-cased" , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A__ = json.loads(open(lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = Path(lowercase ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
68
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger() @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = field(default_factory=snake_case ) __lowerCamelCase = field(default_factory=snake_case ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = len(list(m.modules() ) ) == 1 or isinstance(lowercase , nn.Convad ) or isinstance(lowercase , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowercase ) def __call__( self , lowercase ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowercase ) [x.remove() for x in self.handles] return self @property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 0 __lowerCamelCase = field(default_factory=snake_case ) __lowerCamelCase = field(default_factory=snake_case ) def __call__( self , lowercase ) -> Tuple: '''simple docstring''' A__ = Tracker(self.dest )(lowercase ).parametrized A__ = Tracker(self.src )(lowercase ).parametrized A__ = list(filter(lambda lowercase : type(lowercase ) not in self.src_skip , lowercase ) ) A__ = list(filter(lambda lowercase : type(lowercase ) not in self.dest_skip , lowercase ) ) if len(lowercase ) != len(lowercase ): raise Exception( F'Numbers of operations are different. Source module has {len(lowercase )} operations while' F' destination module has {len(lowercase )}.' ) for dest_m, src_m in zip(lowercase , lowercase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: ResNetConfig , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: bool = True ) -> Optional[int]: '''simple docstring''' print(F'Converting {name}...' ) with torch.no_grad(): A__ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ).eval() A__ = ResNetForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() A__ = ModuleTransfer(src=SCREAMING_SNAKE_CASE_ , dest=SCREAMING_SNAKE_CASE_ ) A__ = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(SCREAMING_SNAKE_CASE_ ) assert torch.allclose(from_model(SCREAMING_SNAKE_CASE_ ) , our_model(SCREAMING_SNAKE_CASE_ ).logits ), "The model logits don't match the original one." A__ = F'resnet{"-".join(name.split("resnet" ) )}' print(SCREAMING_SNAKE_CASE_ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) # we can use the convnext one A__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) print(F'Pushed {checkpoint_name}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: str = None , SCREAMING_SNAKE_CASE_: bool = True ) -> str: '''simple docstring''' A__ = "imagenet-1k-id2label.json" A__ = 1_0_0_0 A__ = (1, num_labels) A__ = "huggingface/label-files" A__ = num_labels A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = partial(SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ ) A__ = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(SCREAMING_SNAKE_CASE_ , names_to_config[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
68
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
1
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger lowerCAmelCase__ = get_logger(__name__) class a__ ( enum.Enum ): """simple docstring""" __lowerCamelCase = 'all_checks' __lowerCamelCase = 'basic_checks' __lowerCamelCase = 'no_checks' class a__ ( snake_case ): """simple docstring""" class a__ ( snake_case ): """simple docstring""" class a__ ( snake_case ): """simple docstring""" class a__ ( snake_case ): """simple docstring""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[dict] , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: Tuple=None ) -> Union[str, Any]: '''simple docstring''' if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) A__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] A__ = " for " + verification_name if verification_name is not None else "" if len(SCREAMING_SNAKE_CASE_ ) > 0: raise NonMatchingChecksumError( F'Checksums didn\'t match{for_verification_name}:\n' F'{bad_urls}\n' "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class a__ ( snake_case ): """simple docstring""" class a__ ( snake_case ): """simple docstring""" class a__ ( snake_case ): """simple docstring""" class a__ ( snake_case ): """simple docstring""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[dict] , SCREAMING_SNAKE_CASE_: dict ) -> List[str]: '''simple docstring''' if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) A__ = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE_ ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE_ ) ) logger.info("All the splits matched successfully." ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: bool = True ) -> dict: '''simple docstring''' if record_checksum: A__ = shaaaa() with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ): m.update(SCREAMING_SNAKE_CASE_ ) A__ = m.hexdigest() else: A__ = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE_ ), "checksum": checksum} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> str: '''simple docstring''' if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
68
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=lowercase , ) assert hasattr(self , "env" ) def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings A__ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase , instance_count=lowercase , instance_type=self.instance_type , debugger_hook_config=lowercase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase , py_version="py36" , ) def UpperCamelCase ( self , lowercase ) -> str: '''simple docstring''' TrainingJobAnalytics(lowercase ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.create_estimator(lowercase ) # run training estimator.fit() # result dataframe A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) A__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowercase )
68
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
1
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging lowerCAmelCase__ = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase = 101 ) -> Any: '''simple docstring''' A__ = length def __len__( self ) -> str: '''simple docstring''' return self.length def __getitem__( self , lowercase ) -> int: '''simple docstring''' return i class a__ : """simple docstring""" def __call__( self , lowercase ) -> Any: '''simple docstring''' return {"input_ids": torch.tensor(lowercase ), "labels": torch.tensor(lowercase )} class a__ ( nn.Module ): """simple docstring""" def __init__( self ) -> Tuple: '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. A__ = nn.Linear(120 , 80 ) def UpperCamelCase ( self , lowercase , lowercase=None ) -> List[str]: '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class a__ ( snake_case ): """simple docstring""" @require_torch_neuroncore def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() A__ = self.get_auto_remove_tmp_dir() A__ = F'--output_dir {output_dir}'.split() A__ = ["torchrun"] + distributed_args + args execute_subprocess_async(lowercase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class a__ ( snake_case ): """simple docstring""" @require_torch_multi_gpu def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() A__ = self.get_auto_remove_tmp_dir() A__ = F'--output_dir {output_dir}'.split() A__ = ["torchrun"] + distributed_args + args execute_subprocess_async(lowercase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py lowerCAmelCase__ = HfArgumentParser((TrainingArguments,)) lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0] logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_0_1, 4_0, 7]: lowerCAmelCase__ = DummyDataset(dataset_length) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: EvalPrediction ) -> Dict: '''simple docstring''' A__ = list(range(len(SCREAMING_SNAKE_CASE_ ) ) ) A__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' ) return {"success": success} lowerCAmelCase__ = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) lowerCAmelCase__ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowerCAmelCase__ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowerCAmelCase__ = 2 lowerCAmelCase__ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowerCAmelCase__ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowerCAmelCase__ = None
68
def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any: '''simple docstring''' A__ = 1 A__ = 2 while i * i <= n: A__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
68
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class a__ : """simple docstring""" def __init__( self , lowercase , ) -> Any: '''simple docstring''' A__ = parent A__ = 13 A__ = 7 A__ = 30 A__ = self.seq_length + self.mem_len A__ = 15 A__ = True A__ = True A__ = 99 A__ = [10, 50, 80] A__ = 32 A__ = 32 A__ = 4 A__ = 8 A__ = 128 A__ = 2 A__ = 2 A__ = None A__ = 1 A__ = 0 A__ = 3 A__ = self.vocab_size - 1 A__ = 0.01 def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFTransfoXLModel(lowercase ) A__ , A__ = model(lowercase ).to_tuple() A__ = {"input_ids": input_ids_a, "mems": mems_a} A__ , A__ = model(lowercase ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> List[str]: '''simple docstring''' A__ = TFTransfoXLLMHeadModel(lowercase ) A__ , A__ = model(lowercase ).to_tuple() A__ = {"input_ids": input_ids_a, "labels": lm_labels} A__ , A__ = model(lowercase ).to_tuple() A__ , A__ = model([input_ids_a, mems_a] ).to_tuple() A__ = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} A__ , A__ = model(lowercase ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = TFTransfoXLForSequenceClassification(lowercase ) A__ = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ((A__) , (A__) , (A__) , (A__)) = config_and_inputs A__ = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __lowerCamelCase = () if is_tf_available() else () __lowerCamelCase = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = TFTransfoXLModelTester(self ) A__ = ConfigTester(self , config_class=lowercase , d_embed=37 ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' self.model_tester.set_seed() A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' self.model_tester.set_seed() A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: A__ = model_class(lowercase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: A__ = model.get_output_embeddings() assert isinstance(lowercase , tf.keras.layers.Layer ) A__ = model.get_bias() assert name is None else: A__ = model.get_output_embeddings() assert x is None A__ = model.get_bias() assert name is None def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @slow def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFTransfoXLModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @require_tf class a__ ( unittest.TestCase ): """simple docstring""" @unittest.skip("Skip test until #12651 is resolved." ) @slow def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" ) # fmt: off A__ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off A__ = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> A__ = model.generate(lowercase , max_length=200 , do_sample=lowercase ) self.assertListEqual(output_ids[0].numpy().tolist() , lowercase )
68
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
1
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""") # TF training parameters lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Namespace ) -> List[str]: '''simple docstring''' return TrainCommand(SCREAMING_SNAKE_CASE_ ) class a__ ( snake_case ): """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> Any: '''simple docstring''' A__ = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=lowercase , required=lowercase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=lowercase , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=lowercase , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=lowercase , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=lowercase , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=lowercase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=lowercase , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=lowercase , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=lowercase , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=lowercase , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=lowercase , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=lowercase , default=3e-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=lowercase , default=1e-08 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=lowercase ) def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = logging.get_logger("transformers-cli/training" ) A__ = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=lowercase ) A__ = args.output A__ = args.column_label A__ = args.column_text A__ = args.column_id self.logger.info(F'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": A__ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F'Loading dataset from {args.train_data}' ) A__ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) A__ = None if args.validation_data: self.logger.info(F'Loading validation dataset from {args.validation_data}' ) A__ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) A__ = args.validation_split A__ = args.train_batch_size A__ = args.valid_batch_size A__ = args.learning_rate A__ = args.adam_epsilon def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
68
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase__ = 1_6 lowerCAmelCase__ = 3_2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Accelerator , SCREAMING_SNAKE_CASE_: DatasetDict , SCREAMING_SNAKE_CASE_: List[int] , SCREAMING_SNAKE_CASE_: List[int] , SCREAMING_SNAKE_CASE_: int = 1_6 ) -> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained("bert-base-cased" ) A__ = DatasetDict( { "train": dataset["train"].select(SCREAMING_SNAKE_CASE_ ), "validation": dataset["train"].select(SCREAMING_SNAKE_CASE_ ), "test": dataset["validation"], } ) def tokenize_function(SCREAMING_SNAKE_CASE_: str ): # max_length=None => use the model max length (it's actually the default) A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A__ = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE_: List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A__ = 1_6 elif accelerator.mixed_precision != "no": A__ = 8 else: A__ = None return tokenizer.pad( SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , ) # Instantiate dataloaders. A__ = DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) A__ = DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) A__ = DataLoader( tokenized_datasets["test"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader, test_dataloader def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> Dict: '''simple docstring''' A__ = [] # Download the dataset A__ = load_dataset("glue" , "mrpc" ) # Create our splits A__ = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A__ = config["lr"] A__ = int(config["num_epochs"] ) A__ = int(config["seed"] ) A__ = int(config["batch_size"] ) A__ = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation A__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A__ = batch_size // MAX_GPU_BATCH_SIZE A__ = MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE_ ) # New Code # # Create our folds: A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] ) A__ = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE_ ): A__ , A__ , A__ = get_fold_dataloaders( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A__ = model.to(accelerator.device ) # Instantiate optimizer A__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) # Instantiate scheduler A__ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A__ , A__ , A__ , A__ , A__ = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A__ = model(**SCREAMING_SNAKE_CASE_ ) A__ = outputs.loss A__ = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**SCREAMING_SNAKE_CASE_ ) A__ = outputs.logits.argmax(dim=-1 ) A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) A__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ ) # New Code # # We also run predictions on the test set at the very end A__ = [] for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**SCREAMING_SNAKE_CASE_ ) A__ = outputs.logits A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: A__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) A__ = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) A__ = metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ ) accelerator.print("Average test metrics from all folds:" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' A__ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds" , type=SCREAMING_SNAKE_CASE_ , default=3 , help="The number of splits to perform across the dataset" ) A__ = parser.parse_args() A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
68
import copy import random from transformers import CLIPTokenizer class a__ ( snake_case ): """simple docstring""" def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowercase , **lowercase ) A__ = {} def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str: '''simple docstring''' A__ = super().add_tokens(lowercase , *lowercase , **lowercase ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any: '''simple docstring''' A__ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) else: A__ = [] for i in range(lowercase ): A__ = placeholder_token + F'_{i}' self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) A__ = output def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = [] for i in range(len(lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: A__ = self.token_map[placeholder_token] A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )] if vector_shuffle: A__ = copy.copy(lowercase ) random.shuffle(lowercase ) A__ = text.replace(lowercase , " ".join(lowercase ) ) return text def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
68
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
1
from __future__ import annotations class a__ : """simple docstring""" def __init__( self , lowercase ) -> None: '''simple docstring''' A__ = data A__ = None A__ = None def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowerCAmelCase__ ( ) -> None: # Main function for testing. '''simple docstring''' A__ = Node(1 ) A__ = Node(2 ) A__ = Node(3 ) A__ = Node(4 ) A__ = Node(5 ) A__ = Node(6 ) A__ = Node(7 ) A__ = Node(8 ) A__ = Node(9 ) print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) ) print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) ) print("Tree is: " ) display(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
68
import datasets from .evaluate import evaluate lowerCAmelCase__ = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCAmelCase__ = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCAmelCase__ = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=lowercase , predictions=lowercase ) return score
68
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"shortest_edge": 224} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = crop_size if crop_size is not None else {"height": 224, "width": 224} A__ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" ) A__ = do_resize A__ = size A__ = resample A__ = do_center_crop A__ = crop_size A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A__ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> int: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase ) A__ = resample if resample is not None else self.resample A__ = do_center_crop if do_center_crop is not None else self.do_center_crop A__ = crop_size if crop_size is not None else self.crop_size A__ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase ) A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A__ = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = {"pixel_values": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
68
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
1
from math import ceil def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = list(range(0 , SCREAMING_SNAKE_CASE_ ) ) A__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check A__ = [] for i in device_map_blocks: if device_map_blocks.count(SCREAMING_SNAKE_CASE_ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(SCREAMING_SNAKE_CASE_ ) # Missing blocks A__ = [i for i in blocks if i not in device_map_blocks] A__ = [i for i in device_map_blocks if i not in blocks] if len(SCREAMING_SNAKE_CASE_ ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any] ) -> Any: '''simple docstring''' A__ = list(range(SCREAMING_SNAKE_CASE_ ) ) A__ = int(ceil(n_layers / len(SCREAMING_SNAKE_CASE_ ) ) ) A__ = [layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
68
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
1
from typing import Any class a__ : """simple docstring""" def __init__( self , lowercase ) -> str: '''simple docstring''' A__ = data A__ = None def __repr__( self ) -> str: '''simple docstring''' return F'Node({self.data})' class a__ : """simple docstring""" def __init__( self ) -> str: '''simple docstring''' A__ = None def __iter__( self ) -> Any: '''simple docstring''' A__ = self.head while node: yield node.data A__ = node.next def __len__( self ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ) -> str: '''simple docstring''' return "->".join([str(lowercase ) for item in self] ) def __getitem__( self , lowercase ) -> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , lowercase , lowercase ) -> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError("list index out of range." ) A__ = self.head for _ in range(lowercase ): A__ = current.next A__ = data def UpperCamelCase ( self , lowercase ) -> None: '''simple docstring''' self.insert_nth(len(self ) , lowercase ) def UpperCamelCase ( self , lowercase ) -> None: '''simple docstring''' self.insert_nth(0 , lowercase ) def UpperCamelCase ( self , lowercase , lowercase ) -> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) A__ = Node(lowercase ) if self.head is None: A__ = new_node elif index == 0: A__ = self.head # link new_node to head A__ = new_node else: A__ = self.head for _ in range(index - 1 ): A__ = temp.next A__ = temp.next A__ = new_node def UpperCamelCase ( self ) -> None: # print every node data '''simple docstring''' print(self ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase ( self ) -> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase ( self , lowercase = 0 ) -> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) A__ = self.head # default first node if index == 0: A__ = self.head.next else: A__ = self.head for _ in range(index - 1 ): A__ = temp.next A__ = temp.next A__ = temp.next.next return delete_node.data def UpperCamelCase ( self ) -> bool: '''simple docstring''' return self.head is None def UpperCamelCase ( self ) -> None: '''simple docstring''' A__ = None A__ = self.head while current: # Store the current node's next node. A__ = current.next # Make the current node's next point backwards A__ = prev # Make the previous node be the current node A__ = current # Make the current node the next node (to progress iteration) A__ = next_node # Return prev in order to put the head at the end A__ = prev def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = LinkedList() assert linked_list.is_empty() is True assert str(SCREAMING_SNAKE_CASE_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(1_0 ): assert len(SCREAMING_SNAKE_CASE_ ) == i linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 ) assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 1_1 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(1_1 ) assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 1_2 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 1_0 assert linked_list.delete_tail() == 1_1 assert len(SCREAMING_SNAKE_CASE_ ) == 9 assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 1_0 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): A__ = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(-8 , 1 ) ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = [ -9, 1_0_0, Node(7_7_3_4_5_1_1_2 ), "dlrow olleH", 7, 5_5_5_5, 0, -192.55555, "Hello, world!", 77.9, Node(1_0 ), None, None, 12.20, ] A__ = LinkedList() for i in test_input: linked_list.insert_tail(SCREAMING_SNAKE_CASE_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(SCREAMING_SNAKE_CASE_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head A__ = linked_list.delete_head() assert result == -9 assert ( str(SCREAMING_SNAKE_CASE_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail A__ = linked_list.delete_tail() assert result == 12.2 assert ( str(SCREAMING_SNAKE_CASE_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list A__ = linked_list.delete_nth(1_0 ) assert result is None assert ( str(SCREAMING_SNAKE_CASE_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(SCREAMING_SNAKE_CASE_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(SCREAMING_SNAKE_CASE_ ) assert ( str(SCREAMING_SNAKE_CASE_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(SCREAMING_SNAKE_CASE_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' from doctest import testmod testmod() A__ = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(SCREAMING_SNAKE_CASE_ ) print("\nReading/changing Node data using indexing:" ) print(F'Element at Position 1: {linked_list[1]}' ) A__ = input("Enter New Value: " ).strip() print("New list:" ) print(SCREAMING_SNAKE_CASE_ ) print(F'length of linked_list is : {len(SCREAMING_SNAKE_CASE_ )}' ) if __name__ == "__main__": main()
68
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[list[int]] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: set ) -> int: '''simple docstring''' A__ , A__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) A__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
68
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any]=False , SCREAMING_SNAKE_CASE_: Union[str, Any]=False ) -> int: '''simple docstring''' A__ = "backbone." if is_semantic else "" A__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ (F'{prefix}cls_token', "beit.embeddings.cls_token"), (F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"), (F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"), (F'{prefix}pos_embed', "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: List[str]=False , SCREAMING_SNAKE_CASE_: int=False ) -> int: '''simple docstring''' for i in range(config.num_hidden_layers ): A__ = "backbone." if is_semantic else "" # queries, keys and values A__ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' ) A__ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' ) A__ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' ) A__ = in_proj_weight[ : config.hidden_size, : ] A__ = q_bias A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained A__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' ) A__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' ) A__ = gamma_a A__ = gamma_a def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE_ ) A__ = val def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' A__ = "http://images.cocodataset.org/val2017/000000039769.jpg" A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str=False ) -> List[Any]: '''simple docstring''' A__ = False if "rvlcdip" in checkpoint_url else True A__ = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: A__ = 1_0_2_4 A__ = 4_0_9_6 A__ = 2_4 A__ = 1_6 # labels if "rvlcdip" in checkpoint_url: A__ = 1_6 A__ = "huggingface/label-files" A__ = "rvlcdip-id2label.json" A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"] A__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) # load HuggingFace model A__ = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image A__ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ ) A__ = prepare_img() A__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) A__ = encoding["pixel_values"] A__ = model(SCREAMING_SNAKE_CASE_ ) A__ = outputs.logits # verify logits A__ = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: if has_lm_head: A__ = "dit-base" if "base" in checkpoint_url else "dit-large" else: A__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) lowerCAmelCase__ = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
68
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
1
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> int: '''simple docstring''' return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def lowerCAmelCase__ ( ) -> Optional[int]: '''simple docstring''' A__ = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) A__ = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE_ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
68
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
1
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> None: '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): A__ , A__ = array[indexa], array[indexa] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> None: '''simple docstring''' if length > 1: A__ = int(length / 2 ) for i in range(SCREAMING_SNAKE_CASE_ , low + middle ): comp_and_swap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + middle , SCREAMING_SNAKE_CASE_ ) bitonic_merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) bitonic_merge(SCREAMING_SNAKE_CASE_ , low + middle , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> None: '''simple docstring''' if length > 1: A__ = int(length / 2 ) bitonic_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) bitonic_sort(SCREAMING_SNAKE_CASE_ , low + middle , SCREAMING_SNAKE_CASE_ , 0 ) bitonic_merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
68
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase__ = """\ Text data. Second line of data.""" lowerCAmelCase__ = """file""" @pytest.fixture(scope="session" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any: '''simple docstring''' A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A__ = input_paths[compression_format] A__ = tmp_path / "cache" A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' A__ = "custom_cache" A__ = "custom_extracted_dir" A__ = tmp_path / "custom_extracted_path" if default_extracted: A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) ) A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A__ = xz_file A__ = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' A__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path A__ = "./__missing_file__.txt" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("s3://huggingface.co" )
68
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> list: '''simple docstring''' A__ = [0] * len(SCREAMING_SNAKE_CASE_ ) for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): # use last results for better performance - dynamic programming A__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: A__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 A__ = j return prefix_result def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> int: '''simple docstring''' return max(prefix_function(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
68
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str] ) -> Tuple: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str]=0 ) -> Union[str, Any]: '''simple docstring''' return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[column] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Dict=float("inf" ) ) -> List[Any]: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): A__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A__ = current_dis return min_dis def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: List[str]=float("inf" ) ) -> int: '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE_ ): for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE_ ): A__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A__ = current_dis return min_dis def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Dict: '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # recursion A__ = points_counts // 2 A__ = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE_ ) A__ = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , points_sorted_on_y[mid:] , points_counts - mid ) A__ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(SCREAMING_SNAKE_CASE_ ) A__ = dis_between_closest_in_strip( SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> Optional[int]: '''simple docstring''' A__ = column_based_sort(SCREAMING_SNAKE_CASE_ , column=0 ) A__ = column_based_sort(SCREAMING_SNAKE_CASE_ , column=1 ) return ( closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ** 0.5 if __name__ == "__main__": lowerCAmelCase__ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
68
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) A__ = (size["height"], size["width"]) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase ) return encoded_outputs
68
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0 ) -> int: '''simple docstring''' A__ = 2**power A__ = str(SCREAMING_SNAKE_CASE_ ) A__ = list(SCREAMING_SNAKE_CASE_ ) A__ = 0 for i in list_num: sum_of_num += int(SCREAMING_SNAKE_CASE_ ) return sum_of_num if __name__ == "__main__": lowerCAmelCase__ = int(input("""Enter the power of 2: """).strip()) print("""2 ^ """, power, """ = """, 2**power) lowerCAmelCase__ = solution(power) print("""Sum of the digits is: """, result)
68
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A__ = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A__ = cached_file(lowercase , lowercase , revision="9b8c223" ) self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): A__ = cached_file("tiny-random-bert" , lowercase ) with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): A__ = cached_file(lowercase , lowercase , revision="aaaa" ) with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) ) A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = mock.Mock() A__ = 500 A__ = {} A__ = HTTPError A__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head: A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" ) A__ = get_file_from_repo("bert-base-cased" , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A__ = json.loads(open(lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = Path(lowercase ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
68
1
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , SCREAMING_SNAKE_CASE_ ) A__ = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A__ = dataset_size < in_memory_max_size else: A__ = False A__ = is_small_dataset(SCREAMING_SNAKE_CASE_ ) assert result == expected
68
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: '''simple docstring''' A__ = [] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for v in tree.values(): shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple[int, ...] ) -> Tuple[int, ...]: '''simple docstring''' A__ = [] for d in reversed(SCREAMING_SNAKE_CASE_ ): idx.append(flat_idx % d ) A__ = flat_idx // d return tuple(reversed(SCREAMING_SNAKE_CASE_ ) ) @torch.jit.ignore def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Sequence[int] , SCREAMING_SNAKE_CASE_: Sequence[int] , SCREAMING_SNAKE_CASE_: Sequence[int] , SCREAMING_SNAKE_CASE_: Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE_: Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(SCREAMING_SNAKE_CASE_: List[bool] ) -> None: A__ = True for i in range(len(SCREAMING_SNAKE_CASE_ ) ): A__ = -1 * (i + 1) l[reversed_idx] &= tally A__ = l[reversed_idx] if start_edges is None: A__ = [s == 0 for s in start] reduce_edge_list(SCREAMING_SNAKE_CASE_ ) if end_edges is None: A__ = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] reduce_edge_list(SCREAMING_SNAKE_CASE_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(SCREAMING_SNAKE_CASE_ ) == 0: return [()] elif len(SCREAMING_SNAKE_CASE_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] A__ = [] A__ = [] # Dimensions common to start and end can be selected directly for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if s == e: path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) ) else: break A__ = tuple(SCREAMING_SNAKE_CASE_ ) A__ = len(SCREAMING_SNAKE_CASE_ ) # start == end, and we're done if divergence_idx == len(SCREAMING_SNAKE_CASE_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None A__ = start[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None A__ = end[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) A__ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: torch.Tensor , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> torch.Tensor: '''simple docstring''' A__ = t.shape[:no_batch_dims] A__ = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # _get_minimal_slice_set is inclusive A__ = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) ) # Get an ordered list of slices to perform A__ = _get_minimal_slice_set( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) A__ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable , SCREAMING_SNAKE_CASE_: Dict[str, Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: bool = False , SCREAMING_SNAKE_CASE_: Any = None , SCREAMING_SNAKE_CASE_: bool = False , ) -> Any: '''simple docstring''' if not (len(SCREAMING_SNAKE_CASE_ ) > 0): raise ValueError("Must provide at least one input" ) A__ = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )] A__ = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] ) def _prep_inputs(SCREAMING_SNAKE_CASE_: torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: A__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) A__ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: A__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t A__ = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ ) A__ = None if _out is not None: A__ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) A__ = 1 for d in orig_batch_dims: flat_batch_dim *= d A__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(SCREAMING_SNAKE_CASE_: torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t A__ = 0 A__ = prepped_outputs for _ in range(SCREAMING_SNAKE_CASE_ ): # Chunk the input if not low_mem: A__ = _select_chunk else: A__ = partial( _chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , ) A__ = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Run the layer on the chunk A__ = layer(**SCREAMING_SNAKE_CASE_ ) # Allocate space for the output if out is None: A__ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ ) # Put the chunk in its pre-allocated space if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): def assign(SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: dict ) -> None: for k, v in da.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assign(SCREAMING_SNAKE_CASE_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: A__ = da[k] assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: A__ = xa elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: A__ = output_chunk else: raise ValueError("Not supported" ) i += chunk_size A__ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ ) return out class a__ : """simple docstring""" def __init__( self , lowercase = 512 , ) -> Any: '''simple docstring''' A__ = max_chunk_size A__ = None A__ = None def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size A__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] A__ = [c for c in candidates if c > min_chunk_size] A__ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(lowercase ) -> bool: try: with torch.no_grad(): fn(*lowercase , chunk_size=lowercase ) return True except RuntimeError: return False A__ = 0 A__ = len(lowercase ) - 1 while i > min_viable_chunk_size_index: A__ = test_chunk_size(candidates[i] ) if not viable: A__ = (min_viable_chunk_size_index + i) // 2 else: A__ = i A__ = (i + len(lowercase ) - 1) // 2 return candidates[min_viable_chunk_size_index] def UpperCamelCase ( self , lowercase , lowercase ) -> bool: '''simple docstring''' A__ = True for aa, aa in zip(lowercase , lowercase ): assert type(lowercase ) == type(lowercase ) if isinstance(lowercase , (list, tuple) ): consistent &= self._compare_arg_caches(lowercase , lowercase ) elif isinstance(lowercase , lowercase ): A__ = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )] A__ = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )] consistent &= self._compare_arg_caches(lowercase , lowercase ) else: consistent &= aa == aa return consistent def UpperCamelCase ( self , lowercase , lowercase , lowercase , ) -> int: '''simple docstring''' A__ = True A__ = tree_map(lambda lowercase : a.shape if isinstance(lowercase , torch.Tensor ) else a , lowercase , lowercase ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(lowercase ) A__ = self._compare_arg_caches(self.cached_arg_data , lowercase ) else: # Otherwise, we can reuse the precomputed value A__ = False if not consistent: A__ = self._determine_favorable_chunk_size( lowercase , lowercase , lowercase , ) A__ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
68
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any]=None ) -> Tuple: '''simple docstring''' require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
68
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
1
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch lowerCAmelCase__ = random.Random() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[int]=1.0 , SCREAMING_SNAKE_CASE_: Tuple=None , SCREAMING_SNAKE_CASE_: Dict=None ) -> List[Any]: '''simple docstring''' if rng is None: A__ = global_rng A__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=7 , lowercase=400 , lowercase=2000 , lowercase=1 , lowercase=0.0 , lowercase=16000 , lowercase=True , lowercase=80 , lowercase=16 , lowercase=64 , lowercase="hann_window" , lowercase=80 , lowercase=7600 , lowercase=1e-10 , lowercase=True , ) -> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = min_seq_length A__ = max_seq_length A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) A__ = feature_size A__ = padding_value A__ = sampling_rate A__ = do_normalize A__ = num_mel_bins A__ = hop_length A__ = win_length A__ = win_function A__ = fmin A__ = fmax A__ = mel_floor A__ = return_attention_mask def UpperCamelCase ( self ) -> str: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def UpperCamelCase ( self , lowercase=False , lowercase=False ) -> Union[str, Any]: '''simple docstring''' def _flatten(lowercase ): return list(itertools.chain(*lowercase ) ) if equal_length: A__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size A__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: A__ = [np.asarray(lowercase ) for x in speech_inputs] return speech_inputs def UpperCamelCase ( self , lowercase=False , lowercase=False ) -> Union[str, Any]: '''simple docstring''' if equal_length: A__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size A__ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: A__ = [np.asarray(lowercase ) for x in speech_inputs] return speech_inputs @require_torch class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaFeatureExtractor def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = SpeechTaFeatureExtractionTester(self ) def UpperCamelCase ( self , lowercase ) -> Dict: '''simple docstring''' self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] A__ = [np.asarray(lowercase ) for speech_input in speech_inputs] # Test not batched input A__ = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values A__ = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) ) # Test batched A__ = feat_extract(lowercase , return_tensors="np" ).input_values A__ = feat_extract(lowercase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ): self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] A__ = ["longest", "max_length", "do_not_pad"] A__ = [None, 1600, None] for max_length, padding in zip(lowercase , lowercase ): A__ = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors="np" ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = range(800 , 1400 , 200 ) A__ = [floats_list((1, x) )[0] for x in lengths] A__ = ["longest", "max_length", "do_not_pad"] A__ = [None, 1600, None] for max_length, padding in zip(lowercase , lowercase ): A__ = feat_extract(lowercase , max_length=lowercase , padding=lowercase ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] A__ = feat_extract( lowercase , truncation=lowercase , max_length=1000 , padding="max_length" , return_tensors="np" ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] A__ = feat_extract( lowercase , truncation=lowercase , max_length=1000 , padding="longest" , return_tensors="np" ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] A__ = feat_extract( lowercase , truncation=lowercase , max_length=2000 , padding="longest" , return_tensors="np" ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = np.random.rand(100 ).astype(np.floataa ) A__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: A__ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) A__ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] A__ = [np.asarray(lowercase ) for speech_input in speech_inputs] # Test feature size A__ = feature_extractor(audio_target=lowercase , padding=lowercase , return_tensors="np" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input A__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) ) # Test batched A__ = feature_extractor(lowercase , return_tensors="np" ).input_values A__ = feature_extractor(lowercase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ): self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] A__ = np.asarray(lowercase ) A__ = feature_extractor(lowercase , return_tensors="np" ).input_values A__ = feature_extractor(lowercase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ): self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = self.feature_extraction_class(**self.feat_extract_dict ) A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowercase ) == len(lowercase ) for x, y in zip(lowercase , processed_features[input_name] ) ) ) A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase ) A__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) A__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: A__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase ) A__ = self.feature_extraction_class(**self.feat_extract_dict ) A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) A__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: A__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.feature_extraction_class(**self.feat_extract_dict ) A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) A__ = feat_extract.num_mel_bins # hack! A__ = feat_extract.pad(lowercase , padding="longest" , return_tensors="np" )[input_name] A__ = feat_extract.pad(lowercase , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.feat_extract_dict A__ = True A__ = self.feature_extraction_class(**lowercase ) A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = [len(lowercase ) for x in speech_inputs] A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) A__ = feat_extract.num_mel_bins # hack! A__ = feat_extract.pad(lowercase , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , lowercase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowercase ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.feat_extract_dict A__ = True A__ = self.feature_extraction_class(**lowercase ) A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = [len(lowercase ) for x in speech_inputs] A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) A__ = min(lowercase ) A__ = feat_extract.num_mel_bins # hack! A__ = feat_extract.pad( lowercase , padding="max_length" , max_length=lowercase , truncation=lowercase , return_tensors="np" ) self.assertIn("attention_mask" , lowercase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' from datasets import load_dataset A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech A__ = ds.sort("id" ).select(range(lowercase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = torch.tensor( [2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03, 3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03, 2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04, 4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03, 7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04, 4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] ) # fmt: on A__ = self._load_datasamples(1 ) A__ = SpeechTaFeatureExtractor() A__ = feature_extractor(lowercase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 93680) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowercase , atol=1e-6 ) ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on A__ = self._load_datasamples(1 ) A__ = SpeechTaFeatureExtractor() A__ = feature_extractor(audio_target=lowercase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowercase , atol=1e-4 ) )
68
def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any: '''simple docstring''' A__ = 1 A__ = 2 while i * i <= n: A__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
68
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCAmelCase__ = logging.get_logger(__name__) # General docstring lowerCAmelCase__ = """MobileNetV1Config""" # Base docstring lowerCAmelCase__ = """google/mobilenet_v1_1.0_224""" lowerCAmelCase__ = [1, 1_0_2_4, 7, 7] # Image classification docstring lowerCAmelCase__ = """google/mobilenet_v1_1.0_224""" lowerCAmelCase__ = """tabby, tabby cat""" lowerCAmelCase__ = [ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[Any]=None ) -> Dict: '''simple docstring''' A__ = {} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = model.mobilenet_va else: A__ = model A__ = "MobilenetV1/Conv2d_0/" A__ = backbone.conv_stem.convolution.weight A__ = backbone.conv_stem.normalization.bias A__ = backbone.conv_stem.normalization.weight A__ = backbone.conv_stem.normalization.running_mean A__ = backbone.conv_stem.normalization.running_var for i in range(1_3 ): A__ = i + 1 A__ = i * 2 A__ = backbone.layer[pt_index] A__ = F'MobilenetV1/Conv2d_{tf_index}_depthwise/' A__ = pointer.convolution.weight A__ = pointer.normalization.bias A__ = pointer.normalization.weight A__ = pointer.normalization.running_mean A__ = pointer.normalization.running_var A__ = backbone.layer[pt_index + 1] A__ = F'MobilenetV1/Conv2d_{tf_index}_pointwise/' A__ = pointer.convolution.weight A__ = pointer.normalization.bias A__ = pointer.normalization.weight A__ = pointer.normalization.running_mean A__ = pointer.normalization.running_var if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = "MobilenetV1/Logits/Conv2d_1c_1x1/" A__ = model.classifier.weight A__ = model.classifier.bias return tf_to_pt_map def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model A__ = tf.train.list_variables(SCREAMING_SNAKE_CASE_ ) A__ = {} for name, shape in init_vars: logger.info(F'Loading TF weight {name} with shape {shape}' ) A__ = tf.train.load_variable(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ = array # Build TF to PyTorch weights loading map A__ = _build_tf_to_pytorch_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for name, pointer in tf_to_pt_map.items(): logger.info(F'Importing {name}' ) if name not in tf_weights: logger.info(F'{name} not in tf pre-trained weights, skipping' ) continue A__ = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) A__ = np.transpose(SCREAMING_SNAKE_CASE_ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer A__ = array.squeeze().transpose() else: A__ = np.transpose(SCREAMING_SNAKE_CASE_ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' ) logger.info(F'Initialize PyTorch weight {name} {array.shape}' ) A__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) tf_weights.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tf_weights.pop(name + "/RMSProp" , SCREAMING_SNAKE_CASE_ ) tf_weights.pop(name + "/RMSProp_1" , SCREAMING_SNAKE_CASE_ ) tf_weights.pop(name + "/ExponentialMovingAverage" , SCREAMING_SNAKE_CASE_ ) logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' ) return model def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: torch.Tensor , SCREAMING_SNAKE_CASE_: nn.Convad ) -> torch.Tensor: '''simple docstring''' A__ , A__ = features.shape[-2:] A__ , A__ = conv_layer.stride A__ , A__ = conv_layer.kernel_size if in_height % stride_height == 0: A__ = max(kernel_height - stride_height , 0 ) else: A__ = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: A__ = max(kernel_width - stride_width , 0 ) else: A__ = max(kernel_width - (in_width % stride_width) , 0 ) A__ = pad_along_width // 2 A__ = pad_along_width - pad_left A__ = pad_along_height // 2 A__ = pad_along_height - pad_top A__ = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "constant" , 0.0 ) class a__ ( nn.Module ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = False , lowercase = True , lowercase = True , ) -> None: '''simple docstring''' super().__init__() A__ = config if in_channels % groups != 0: raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' ) if out_channels % groups != 0: raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' ) A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) A__ = nn.Convad( in_channels=lowercase , out_channels=lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase , groups=lowercase , bias=lowercase , padding_mode="zeros" , ) if use_normalization: A__ = nn.BatchNormad( num_features=lowercase , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowercase , track_running_stats=lowercase , ) else: A__ = None if use_activation: if isinstance(lowercase , lowercase ): A__ = ACTaFN[use_activation] elif isinstance(config.hidden_act , lowercase ): A__ = ACTaFN[config.hidden_act] else: A__ = config.hidden_act else: A__ = None def UpperCamelCase ( self , lowercase ) -> torch.Tensor: '''simple docstring''' if self.config.tf_padding: A__ = apply_tf_padding(lowercase , self.convolution ) A__ = self.convolution(lowercase ) if self.normalization is not None: A__ = self.normalization(lowercase ) if self.activation is not None: A__ = self.activation(lowercase ) return features class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = MobileNetVaConfig __lowerCamelCase = load_tf_weights_in_mobilenet_va __lowerCamelCase = 'mobilenet_v1' __lowerCamelCase = 'pixel_values' __lowerCamelCase = False def UpperCamelCase ( self , lowercase ) -> None: '''simple docstring''' if isinstance(lowercase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowercase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCAmelCase__ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase__ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , snake_case , ) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase = True ) -> Union[str, Any]: '''simple docstring''' super().__init__(lowercase ) A__ = config A__ = 32 A__ = max(int(depth * config.depth_multiplier ) , config.min_depth ) A__ = MobileNetVaConvLayer( lowercase , in_channels=config.num_channels , out_channels=lowercase , kernel_size=3 , stride=2 , ) A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] A__ = nn.ModuleList() for i in range(13 ): A__ = out_channels if strides[i] == 2 or i == 0: depth *= 2 A__ = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=3 , stride=strides[i] , groups=lowercase , ) ) self.layer.append( MobileNetVaConvLayer( lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=1 , ) ) A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase ( self , lowercase = None , lowercase = None , lowercase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: '''simple docstring''' A__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) A__ = self.conv_stem(lowercase ) A__ = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): A__ = layer_module(lowercase ) if output_hidden_states: A__ = all_hidden_states + (hidden_states,) A__ = hidden_states if self.pooler is not None: A__ = torch.flatten(self.pooler(lowercase ) , start_dim=1 ) else: A__ = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=lowercase , ) @add_start_docstrings( '\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case , ) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase ) -> None: '''simple docstring''' super().__init__(lowercase ) A__ = config.num_labels A__ = MobileNetVaModel(lowercase ) A__ = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head A__ = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase ) A__ = nn.Linear(lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: '''simple docstring''' A__ = return_dict if return_dict is not None else self.config.use_return_dict A__ = self.mobilenet_va(lowercase , output_hidden_states=lowercase , return_dict=lowercase ) A__ = outputs.pooler_output if return_dict else outputs[1] A__ = self.classifier(self.dropout(lowercase ) ) A__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ = "single_label_classification" else: A__ = "multi_label_classification" if self.config.problem_type == "regression": A__ = MSELoss() if self.num_labels == 1: A__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ = loss_fct(lowercase , lowercase ) elif self.config.problem_type == "single_label_classification": A__ = CrossEntropyLoss() A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ = BCEWithLogitsLoss() A__ = loss_fct(lowercase , lowercase ) if not return_dict: A__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states , )
68
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'big_bird' def __init__( self , lowercase=50358 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=4096 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=66 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=64 , lowercase=3 , lowercase=None , **lowercase , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps A__ = use_cache A__ = rescale_embeddings A__ = attention_type A__ = use_bias A__ = block_size A__ = num_random_blocks A__ = classifier_dropout class a__ ( snake_case ): """simple docstring""" @property def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: "batch", 1: "choice", 2: "sequence"} else: A__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
68
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['image_processor', 'tokenizer'] __lowerCamelCase = 'ViTImageProcessor' __lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> List[Any]: '''simple docstring''' A__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase , ) A__ = kwargs.pop("feature_extractor" ) A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase , lowercase ) def __call__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Any: '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: A__ = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if visual_prompt is not None: A__ = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: A__ = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if visual_prompt is not None and images is not None: A__ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: A__ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: A__ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def UpperCamelCase ( self , *lowercase , **lowercase ) -> List[str]: '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def UpperCamelCase ( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase , ) return self.image_processor_class @property def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase , ) return self.image_processor
68
import copy import random from transformers import CLIPTokenizer class a__ ( snake_case ): """simple docstring""" def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowercase , **lowercase ) A__ = {} def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str: '''simple docstring''' A__ = super().add_tokens(lowercase , *lowercase , **lowercase ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any: '''simple docstring''' A__ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) else: A__ = [] for i in range(lowercase ): A__ = placeholder_token + F'_{i}' self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) A__ = output def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = [] for i in range(len(lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: A__ = self.token_map[placeholder_token] A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )] if vector_shuffle: A__ = copy.copy(lowercase ) random.shuffle(lowercase ) A__ = text.replace(lowercase , " ".join(lowercase ) ) return text def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
68
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple: '''simple docstring''' A__ = b.T A__ = np.sum(np.square(SCREAMING_SNAKE_CASE_ ) , axis=1 ) A__ = np.sum(np.square(SCREAMING_SNAKE_CASE_ ) , axis=0 ) A__ = np.matmul(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ = aa[:, None] - 2 * ab + ba[None, :] return d def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict: '''simple docstring''' A__ = x.reshape(-1 , 3 ) A__ = squared_euclidean_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return np.argmin(SCREAMING_SNAKE_CASE_ , axis=1 ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = None , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 256, "width": 256} A__ = get_size_dict(lowercase ) A__ = np.array(lowercase ) if clusters is not None else None A__ = do_resize A__ = size A__ = resample A__ = do_normalize A__ = do_color_quantize def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( lowercase , size=(size["height"], size["width"]) , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , ) -> np.ndarray: '''simple docstring''' A__ = rescale(image=lowercase , scale=1 / 127.5 , data_format=lowercase ) A__ = image - 1 return image def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = size if size is not None else self.size A__ = get_size_dict(lowercase ) A__ = resample if resample is not None else self.resample A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize A__ = clusters if clusters is not None else self.clusters A__ = np.array(lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase ) for image in images] if do_color_quantize: A__ = [to_channel_dimension_format(lowercase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) A__ = np.array(lowercase ) A__ = color_quantize(lowercase , lowercase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) A__ = images.shape[0] A__ = images.reshape(lowercase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. A__ = list(lowercase ) else: A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = {"input_ids": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
68
import datasets from .evaluate import evaluate lowerCAmelCase__ = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCAmelCase__ = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCAmelCase__ = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=lowercase , predictions=lowercase ) return score
68
1
import requests lowerCAmelCase__ = """YOUR API KEY""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = giphy_api_key ) -> list: '''simple docstring''' A__ = "+".join(query.split() ) A__ = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}' A__ = requests.get(SCREAMING_SNAKE_CASE_ ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("""\n""".join(get_gifs("""space ship""")))
68
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
1
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
1
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ComputeEnvironment.AMAZON_SAGEMAKER __lowerCamelCase = True __lowerCamelCase = 'ml.p3.2xlarge' __lowerCamelCase = 'accelerate_sagemaker_execution_role' __lowerCamelCase = 'hf-sm' __lowerCamelCase = 'us-east-1' __lowerCamelCase = 1 __lowerCamelCase = 'accelerate-sagemaker-1' __lowerCamelCase = '1.6' __lowerCamelCase = '4.4' __lowerCamelCase = 'train.py' __lowerCamelCase = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] __lowerCamelCase = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] , lowercase ) assert isinstance(converted_args["do_train"] , lowercase ) assert isinstance(converted_args["epochs"] , lowercase ) assert isinstance(converted_args["learning_rate"] , lowercase ) assert isinstance(converted_args["max_steps"] , lowercase ) with pytest.raises(lowercase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
68
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ReformerTokenizer __lowerCamelCase = ReformerTokenizerFast __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> int: '''simple docstring''' super().setUp() A__ = ReformerTokenizer(lowercase , keep_accents=lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = "<s>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowercase ) , 1000 ) def UpperCamelCase ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = "I was born in 92000, and this is falsé." A__ = tokenizer.tokenize(lowercase ) A__ = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase ) A__ = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) def UpperCamelCase ( self , lowercase=15 ) -> Any: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) # Simple input A__ = "This is a simple input" A__ = ["This is a simple input 1", "This is a simple input 2"] A__ = ("This is a simple input", "This is a pair") A__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="max_length" ) # Simple input self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="max_length" ) # Simple input self.assertRaises( lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="max_length" , ) # Pair input self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="max_length" ) # Pair input self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="max_length" ) # Pair input self.assertRaises( lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="max_length" , ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = ReformerTokenizer(lowercase , keep_accents=lowercase ) A__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual( lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" ) @slow def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = "Hello World!" A__ = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) A__ = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @require_torch @slow def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:10] A__ = " ".join(lowercase ) A__ = self.big_tokenizer.encode_plus(lowercase , return_tensors="pt" ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence["input_ids"].shape A__ = ReformerModel(lowercase ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase ) model(**lowercase ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase , sequences=lowercase , )
68
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
1
lowerCAmelCase__ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Union[str, Any]: '''simple docstring''' A__ = [False] * len(SCREAMING_SNAKE_CASE_ ) A__ = [s] A__ = True while queue: A__ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(SCREAMING_SNAKE_CASE_ ) A__ = True A__ = u return visited[t] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Any ) -> Optional[Any]: '''simple docstring''' A__ = [-1] * (len(SCREAMING_SNAKE_CASE_ )) A__ = 0 A__ = [] A__ = [i[:] for i in graph] # Record original cut, copy. while bfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = float("Inf" ) A__ = sink while s != source: # Find the minimum value in select path A__ = min(SCREAMING_SNAKE_CASE_ , graph[parent[s]][s] ) A__ = parent[s] max_flow += path_flow A__ = sink while v != source: A__ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A__ = parent[v] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
68
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Optional[int]: '''simple docstring''' A__ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A__ = s_dict.pop(SCREAMING_SNAKE_CASE_ ) elif "subsample" in key: A__ = s_dict.pop(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]: '''simple docstring''' A__ , A__ = emb.weight.shape A__ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) A__ = emb.weight.data return lin_layer def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str] ) -> str: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) A__ = mam_aaa["args"] A__ = mam_aaa["model"] A__ = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) rename_keys(SCREAMING_SNAKE_CASE_ ) A__ = state_dict["decoder.embed_tokens.weight"].shape[0] A__ = args.share_decoder_input_output_embed A__ = [int(SCREAMING_SNAKE_CASE_ ) for i in args.conv_kernel_sizes.split("," )] A__ = SpeechaTextConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(SCREAMING_SNAKE_CASE_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_0_0 , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE_ , ) A__ = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) A__ , A__ = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F' but all the following weights are missing {missing}' ) if tie_embeds: A__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A__ = lm_head_weights model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCAmelCase__ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
68
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> List[Any]: '''simple docstring''' return EnvironmentCommand() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Tuple: '''simple docstring''' return EnvironmentCommand(args.accelerate_config_file ) class a__ ( snake_case ): """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> List[Any]: '''simple docstring''' A__ = parser.add_parser("env" ) download_parser.set_defaults(func=lowercase ) download_parser.add_argument( "--accelerate-config_file" , default=lowercase , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=lowercase ) def __init__( self , lowercase , *lowercase ) -> None: '''simple docstring''' A__ = accelerate_config_file def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = "not installed" if is_safetensors_available(): import safetensors A__ = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors A__ = F'{safetensors.__version__} but is ignored because of PyTorch version too old.' A__ = "not installed" A__ = A__ = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file A__ = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowercase ): A__ = load_config_from_file(self._accelerate_config_file ).to_dict() A__ = ( "\n".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(lowercase , lowercase ) else F'\t{accelerate_config}' ) A__ = "not installed" A__ = "NA" if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = "not installed" A__ = "NA" if is_tf_available(): import tensorflow as tf A__ = tf.__version__ try: # deprecated in v2.1 A__ = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool A__ = bool(tf.config.list_physical_devices("GPU" ) ) A__ = "not installed" A__ = "not installed" A__ = "not installed" A__ = "NA" if is_flax_available(): import flax import jax import jaxlib A__ = flax.__version__ A__ = jax.__version__ A__ = jaxlib.__version__ A__ = jax.lib.xla_bridge.get_backend().platform A__ = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": F'{safetensors_version}', "Accelerate version": F'{accelerate_version}', "Accelerate config": F'{accelerate_config_str}', "PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})', "Tensorflow version (GPU?)": F'{tf_version} ({tf_cuda_available})', "Flax version (CPU?/GPU?/TPU?)": F'{flax_version} ({jax_backend})', "Jax version": F'{jax_version}', "JaxLib version": F'{jaxlib_version}', "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(lowercase ) ) return info @staticmethod def UpperCamelCase ( lowercase ) -> Optional[int]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
68
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
1
from itertools import count def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 5_0 ) -> int: '''simple docstring''' A__ = [1] * min_block_length for n in count(SCREAMING_SNAKE_CASE_ ): fill_count_functions.append(1 ) for block_length in range(SCREAMING_SNAKE_CASE_ , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(f"""{solution() = }""")
68
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowerCAmelCase__ = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
68
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """nielsr/canine-s""": 2_0_4_8, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase__ = 1_1_1_4_1_1_2 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase__ = 0 lowerCAmelCase__ = 0xe0_00 lowerCAmelCase__ = 0xe0_01 lowerCAmelCase__ = 0xe0_02 lowerCAmelCase__ = 0xe0_03 lowerCAmelCase__ = 0xe0_04 # Maps special codepoints to human-readable names. lowerCAmelCase__ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=False , lowercase=2048 , **lowercase , ) -> List[Any]: '''simple docstring''' A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token super().__init__( bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , model_max_length=lowercase , **lowercase , ) # Creates a mapping for looking up the IDs of special symbols. A__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): A__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. A__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } A__ = UNICODE_VOCAB_SIZE A__ = len(self._special_codepoints ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' return self._unicode_vocab_size def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' return list(lowercase ) def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' try: return ord(lowercase ) except TypeError: raise ValueError(F'invalid token: \'{token}\'' ) def UpperCamelCase ( self , lowercase ) -> str: '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(lowercase ) except TypeError: raise ValueError(F'invalid id: {index}' ) def UpperCamelCase ( self , lowercase ) -> Dict: '''simple docstring''' return "".join(lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] A__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) A__ = [1] + ([0] * len(lowercase )) + [1] if token_ids_a is not None: result += ([0] * len(lowercase )) + [1] return result def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] A__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def UpperCamelCase ( self , lowercase , lowercase = None ) -> Union[str, Any]: '''simple docstring''' return ()
68
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase__ = """\ Text data. Second line of data.""" lowerCAmelCase__ = """file""" @pytest.fixture(scope="session" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any: '''simple docstring''' A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A__ = input_paths[compression_format] A__ = tmp_path / "cache" A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' A__ = "custom_cache" A__ = "custom_extracted_dir" A__ = tmp_path / "custom_extracted_path" if default_extracted: A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) ) A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A__ = xz_file A__ = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' A__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path A__ = "./__missing_file__.txt" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("s3://huggingface.co" )
68
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""", """xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""", """xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""", """xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""", """xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""", """xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""", """xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""", """xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""", """xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""", """xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'xlm' __lowerCamelCase = { 'hidden_size': 'emb_dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', 'n_words': 'vocab_size', # For backward compatibility } def __init__( self , lowercase=30145 , lowercase=2048 , lowercase=12 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=1 , lowercase=True , lowercase=512 , lowercase=2048**-0.5 , lowercase=1e-12 , lowercase=0.02 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=3 , lowercase=5 , lowercase=True , lowercase="first" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=0 , lowercase=0 , lowercase=2 , lowercase=0 , **lowercase , ) -> Optional[int]: '''simple docstring''' A__ = vocab_size A__ = emb_dim A__ = n_layers A__ = n_heads A__ = dropout A__ = attention_dropout A__ = gelu_activation A__ = sinusoidal_embeddings A__ = causal A__ = asm A__ = n_langs A__ = use_lang_emb A__ = layer_norm_eps A__ = bos_index A__ = eos_index A__ = pad_index A__ = unk_index A__ = mask_index A__ = is_encoder A__ = max_position_embeddings A__ = embed_init_std A__ = init_std A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_proj_to_labels A__ = summary_first_dropout A__ = start_n_top A__ = end_n_top A__ = mask_token_id A__ = lang_id if "n_words" in kwargs: A__ = kwargs["n_words"] super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , **lowercase ) class a__ ( snake_case ): """simple docstring""" @property def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: "batch", 1: "choice", 2: "sequence"} else: A__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
68
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
1
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } lowerCAmelCase__ = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } lowerCAmelCase__ = { """vinai/phobert-base""": 2_5_6, """vinai/phobert-large""": 2_5_6, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> str: '''simple docstring''' A__ = set() A__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ = char A__ = set(SCREAMING_SNAKE_CASE_ ) return pairs class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , **lowercase , ) -> Optional[Any]: '''simple docstring''' super().__init__( bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , **lowercase , ) A__ = vocab_file A__ = merges_file A__ = {} A__ = 0 A__ = 1 A__ = 2 A__ = 3 self.add_from_file(lowercase ) A__ = {v: k for k, v in self.encoder.items()} with open(lowercase , encoding="utf-8" ) as merges_handle: A__ = merges_handle.read().split("\n" )[:-1] A__ = [tuple(merge.split()[:-1] ) for merge in merges] A__ = dict(zip(lowercase , range(len(lowercase ) ) ) ) A__ = {} def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) if token_ids_a is None: return [1] + ([0] * len(lowercase )) + [1] return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1] def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase ( self ) -> int: '''simple docstring''' return len(self.encoder ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def UpperCamelCase ( self , lowercase ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] A__ = tuple(lowercase ) A__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) A__ = get_pairs(lowercase ) if not pairs: return token while True: A__ = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ = bigram A__ = [] A__ = 0 while i < len(lowercase ): try: A__ = word.index(lowercase , lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ = j if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ = tuple(lowercase ) A__ = new_word if len(lowercase ) == 1: break else: A__ = get_pairs(lowercase ) A__ = "@@ ".join(lowercase ) A__ = word[:-4] A__ = word return word def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = [] A__ = re.findall(R"\S+\n?" , lowercase ) for token in words: split_tokens.extend(list(self.bpe(lowercase ).split(" " ) ) ) return split_tokens def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(lowercase , self.unk_token ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = " ".join(lowercase ).replace("@@ " , "" ).strip() return out_string def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowercase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A__ = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ): copyfile(self.vocab_file , lowercase ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowercase ): copyfile(self.merges_file , lowercase ) return out_vocab_file, out_merge_file def UpperCamelCase ( self , lowercase ) -> str: '''simple docstring''' if isinstance(lowercase , lowercase ): try: with open(lowercase , "r" , encoding="utf-8" ) as fd: self.add_from_file(lowercase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' ) return A__ = f.readlines() for lineTmp in lines: A__ = lineTmp.strip() A__ = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) A__ = line[:idx] A__ = len(self.encoder )
68
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) A__ = (size["height"], size["width"]) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase ) return encoded_outputs
68
1
from PIL import Image def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image ) -> Image: '''simple docstring''' A__ , A__ = image.size A__ = 0 A__ = image.load() for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): A__ = pixels[j, i] mean += pixel mean //= width * height for j in range(SCREAMING_SNAKE_CASE_ ): for i in range(SCREAMING_SNAKE_CASE_ ): A__ = 2_5_5 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowerCAmelCase__ = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
68
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A__ = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A__ = cached_file(lowercase , lowercase , revision="9b8c223" ) self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): A__ = cached_file("tiny-random-bert" , lowercase ) with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): A__ = cached_file(lowercase , lowercase , revision="aaaa" ) with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) ) A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = mock.Mock() A__ = 500 A__ = {} A__ = HTTPError A__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head: A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" ) A__ = get_file_from_repo("bert-base-cased" , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A__ = json.loads(open(lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = Path(lowercase ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
68
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str]=1 ) -> str: '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any=0 ) -> Dict: '''simple docstring''' A__ = [] for old_item in old_list: A__ = old_item.replace("in_layers.0" , "norm1" ) A__ = new_item.replace("in_layers.2" , "conv1" ) A__ = new_item.replace("out_layers.0" , "norm2" ) A__ = new_item.replace("out_layers.3" , "conv2" ) A__ = new_item.replace("emb_layers.1" , "time_emb_proj" ) A__ = new_item.replace("skip_connection" , "conv_shortcut" ) A__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Tuple=0 ) -> Dict: '''simple docstring''' A__ = [] for old_item in old_list: A__ = old_item A__ = new_item.replace("norm.weight" , "group_norm.weight" ) A__ = new_item.replace("norm.bias" , "group_norm.bias" ) A__ = new_item.replace("proj_out.weight" , "proj_attn.weight" ) A__ = new_item.replace("proj_out.bias" , "proj_attn.bias" ) A__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: Tuple=None , SCREAMING_SNAKE_CASE_: List[str]=None ) -> int: '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): A__ = old_checkpoint[path] A__ = old_tensor.shape[0] // 3 A__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) A__ = old_tensor.shape[0] // config["num_head_channels"] // 3 A__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) A__ , A__ , A__ = old_tensor.split(channels // num_heads , dim=1 ) A__ = query.reshape(SCREAMING_SNAKE_CASE_ ) A__ = key.reshape(SCREAMING_SNAKE_CASE_ ) A__ = value.reshape(SCREAMING_SNAKE_CASE_ ) for path in paths: A__ = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here A__ = new_path.replace("middle_block.0" , "mid_block.resnets.0" ) A__ = new_path.replace("middle_block.1" , "mid_block.attentions.0" ) A__ = new_path.replace("middle_block.2" , "mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: A__ = new_path.replace(replacement["old"] , replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: A__ = old_checkpoint[path["old"]][:, :, 0] else: A__ = old_checkpoint[path["old"]] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = {} A__ = checkpoint["time_embed.0.weight"] A__ = checkpoint["time_embed.0.bias"] A__ = checkpoint["time_embed.2.weight"] A__ = checkpoint["time_embed.2.bias"] A__ = checkpoint["input_blocks.0.0.weight"] A__ = checkpoint["input_blocks.0.0.bias"] A__ = checkpoint["out.0.weight"] A__ = checkpoint["out.0.bias"] A__ = checkpoint["out.2.weight"] A__ = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) A__ = { layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } # Retrieves the keys for the middle blocks only A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) A__ = { layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } # Retrieves the keys for the output blocks only A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) A__ = { layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } for i in range(1 , SCREAMING_SNAKE_CASE_ ): A__ = (i - 1) // (config["num_res_blocks"] + 1) A__ = (i - 1) % (config["num_res_blocks"] + 1) A__ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key] A__ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key] if F'input_blocks.{i}.0.op.weight' in checkpoint: A__ = checkpoint[ F'input_blocks.{i}.0.op.weight' ] A__ = checkpoint[ F'input_blocks.{i}.0.op.bias' ] continue A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) A__ = {"old": F'input_blocks.{i}.0', "new": F'down_blocks.{block_id}.resnets.{layer_in_block_id}'} A__ = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ): A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) A__ = { "old": F'input_blocks.{i}.1', "new": F'down_blocks.{block_id}.attentions.{layer_in_block_id}', } A__ = { F'input_blocks.{i}.1.qkv.bias': { "key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', "query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', "value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, F'input_blocks.{i}.1.qkv.weight': { "key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', "query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', "value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , ) A__ = middle_blocks[0] A__ = middle_blocks[1] A__ = middle_blocks[2] A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) A__ = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): A__ = i // (config["num_res_blocks"] + 1) A__ = i % (config["num_res_blocks"] + 1) A__ = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]] A__ = {} for layer in output_block_layers: A__ , A__ = layer.split("." )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ ) else: A__ = [layer_name] if len(SCREAMING_SNAKE_CASE_ ) > 1: A__ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key] A__ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key] A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) A__ = {"old": F'output_blocks.{i}.0', "new": F'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): A__ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) A__ = checkpoint[ F'output_blocks.{i}.{index}.conv.weight' ] A__ = checkpoint[ F'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(SCREAMING_SNAKE_CASE_ ) == 2: A__ = [] if len(SCREAMING_SNAKE_CASE_ ): A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) A__ = { "old": F'output_blocks.{i}.1', "new": F'up_blocks.{block_id}.attentions.{layer_in_block_id}', } A__ = { F'output_blocks.{i}.1.qkv.bias': { "key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', "query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', "value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, F'output_blocks.{i}.1.qkv.weight': { "key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', "query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', "value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , ) else: A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: A__ = ".".join(["output_blocks", str(SCREAMING_SNAKE_CASE_ ), path["old"]] ) A__ = ".".join(["up_blocks", str(SCREAMING_SNAKE_CASE_ ), "resnets", str(SCREAMING_SNAKE_CASE_ ), path["new"]] ) A__ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = torch.load(args.checkpoint_path) with open(args.config_file) as f: lowerCAmelCase__ = json.loads(f.read()) lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] lowerCAmelCase__ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: lowerCAmelCase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1])) lowerCAmelCase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1])) lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
68
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
1
from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase ) -> Dict: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM A__ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase , scheduler=lowercase ) @torch.no_grad() def __call__( self , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(self.unet.config.sample_size , lowercase ): A__ = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(lowercase )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A__ = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A__ = self.unet(lowercase , lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A__ = self.scheduler.step( lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase ).prev_sample A__ = (image / 2 + 0.5).clamp(0 , 1 ) A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A__ = self.numpy_to_pil(lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase )
68
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
1
import re from filelock import FileLock try: import nltk lowerCAmelCase__ = True except (ImportError, ModuleNotFoundError): lowerCAmelCase__ = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> str: '''simple docstring''' re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
68
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> float: '''simple docstring''' A__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowerCAmelCase__ ( ) -> Optional[Any]: '''simple docstring''' print(sum_of_series(1 , 1 , 1_0 ) ) if __name__ == "__main__": import doctest doctest.testmod()
68
def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any: '''simple docstring''' A__ = 1 A__ = 2 while i * i <= n: A__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
68
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = (PNDMScheduler,) __lowerCamelCase = (('num_inference_steps', 50),) def UpperCamelCase ( self , **lowercase ) -> Tuple: '''simple docstring''' A__ = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase ) return config def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Any: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("num_inference_steps" , lowercase ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase ) A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals A__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) A__ = scheduler_class.from_pretrained(lowercase ) new_scheduler.set_timesteps(lowercase ) # copy over dummy past residuals A__ = dummy_past_residuals[:] A__ = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A__ = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Tuple: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("num_inference_steps" , lowercase ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) A__ = scheduler_class.from_pretrained(lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[:] A__ = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A__ = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample A__ = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCamelCase ( self , **lowercase ) -> Dict: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase ) A__ = scheduler_class(**lowercase ) A__ = 10 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.prk_timesteps ): A__ = model(lowercase , lowercase ) A__ = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A__ = model(lowercase , lowercase ) A__ = scheduler.step_plms(lowercase , lowercase , lowercase ).prev_sample return sample def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("num_inference_steps" , lowercase ) for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) A__ = self.dummy_sample A__ = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase , "set_timesteps" ): scheduler.set_timesteps(lowercase ) elif num_inference_steps is not None and not hasattr(lowercase , "set_timesteps" ): A__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A__ = dummy_past_residuals[:] A__ = scheduler.step_prk(lowercase , 0 , lowercase , **lowercase ).prev_sample A__ = scheduler.step_prk(lowercase , 1 , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A__ = scheduler.step_plms(lowercase , 0 , lowercase , **lowercase ).prev_sample A__ = scheduler.step_plms(lowercase , 1 , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(steps_offset=1 ) A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=lowercase , beta_end=lowercase ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = 27 for scheduler_class in self.scheduler_classes: A__ = self.dummy_sample A__ = 0.1 * sample A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A__ = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' with self.assertRaises(lowercase ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.full_loop() A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.full_loop(prediction_type="v_prediction" ) A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 ) A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 ) A__ = torch.sum(torch.abs(lowercase ) ) A__ = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
68
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) A__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) A__ = "xvjiarui/stable-diffusion-2-inpainting" A__ , A__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowercase , safety_checker=lowercase ) A__ = "Face of a yellow cat, high resolution, sitting on a park bench" A__ = jax.random.PRNGKey(0 ) A__ = 50 A__ = jax.device_count() A__ = num_samples * [prompt] A__ = num_samples * [init_image] A__ = num_samples * [mask_image] A__ , A__ , A__ = pipeline.prepare_inputs(lowercase , lowercase , lowercase ) # shard inputs and rng A__ = replicate(lowercase ) A__ = jax.random.split(lowercase , jax.device_count() ) A__ = shard(lowercase ) A__ = shard(lowercase ) A__ = shard(lowercase ) A__ = pipeline( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , jit=lowercase ) A__ = output.images.reshape(lowercase , 512 , 512 , 3 ) A__ = images[0, 253:256, 253:256, -1] A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ = jnp.array( [0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
68
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
1
import os from typing import Dict, List, Tuple, TypeVar, Union lowerCAmelCase__ = TypeVar("""T""") lowerCAmelCase__ = Union[List[T], Tuple[T, ...]] lowerCAmelCase__ = Union[T, List[T], Dict[str, T]] lowerCAmelCase__ = Union[str, bytes, os.PathLike]
68
import copy import random from transformers import CLIPTokenizer class a__ ( snake_case ): """simple docstring""" def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowercase , **lowercase ) A__ = {} def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str: '''simple docstring''' A__ = super().add_tokens(lowercase , *lowercase , **lowercase ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any: '''simple docstring''' A__ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) else: A__ = [] for i in range(lowercase ): A__ = placeholder_token + F'_{i}' self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) A__ = output def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = [] for i in range(len(lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: A__ = self.token_map[placeholder_token] A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )] if vector_shuffle: A__ = copy.copy(lowercase ) random.shuffle(lowercase ) A__ = text.replace(lowercase , " ".join(lowercase ) ) return text def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
68
1
from typing import List from .keymap import KEYMAP, get_character def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' def decorator(SCREAMING_SNAKE_CASE_: Optional[Any] ): A__ = getattr(SCREAMING_SNAKE_CASE_ , "handle_key" , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , "handle_key" , SCREAMING_SNAKE_CASE_ ) return func return decorator def lowerCAmelCase__ ( *SCREAMING_SNAKE_CASE_: List[str] ) -> Any: '''simple docstring''' def decorator(SCREAMING_SNAKE_CASE_: int ): A__ = getattr(SCREAMING_SNAKE_CASE_ , "handle_key" , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , "handle_key" , SCREAMING_SNAKE_CASE_ ) return func return decorator class a__ ( snake_case ): """simple docstring""" def __new__( cls , lowercase , lowercase , lowercase ) -> str: '''simple docstring''' A__ = super().__new__(cls , lowercase , lowercase , lowercase ) if not hasattr(lowercase , "key_handler" ): setattr(lowercase , "key_handler" , {} ) setattr(lowercase , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): A__ = getattr(lowercase , "handle_key" , [] ) for key in handled_keys: A__ = value return new_cls @staticmethod def UpperCamelCase ( cls ) -> Optional[Any]: '''simple docstring''' A__ = get_character() if char != KEYMAP["undefined"]: A__ = ord(lowercase ) A__ = cls.key_handler.get(lowercase ) if handler: A__ = char return handler(cls ) else: return None def lowerCAmelCase__ ( cls: int ) -> Dict: '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCAmelCase__ = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } lowerCAmelCase__ = { """facebook/blenderbot_small-90M""": 5_1_2, } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = BlenderbotSmallTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , lowercase=True , **lowercase , ) -> Dict: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=lowercase , merges=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , ) , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , **lowercase , ) A__ = add_prefix_space def UpperCamelCase ( self , lowercase , lowercase=None ) -> Optional[Any]: '''simple docstring''' A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
68
import datasets from .evaluate import evaluate lowerCAmelCase__ = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCAmelCase__ = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCAmelCase__ = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=lowercase , predictions=lowercase ) return score
68
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) lowerCAmelCase__ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) lowerCAmelCase__ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) lowerCAmelCase__ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) lowerCAmelCase__ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) lowerCAmelCase__ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCAmelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModel) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase__ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCAmelCase__ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase__ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCAmelCase__ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase__ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class a__ ( _BaseAutoModelClass ): """simple docstring""" __lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCAmelCase__ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
68
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.4814_5466, 0.457_8275, 0.4082_1073] , lowercase=[0.2686_2954, 0.2613_0258, 0.2757_7711] , lowercase=True , ) -> Dict: '''simple docstring''' A__ = size if size is not None else {"height": 224, "width": 224} A__ = crop_size if crop_size is not None else {"height": 18, "width": 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_center_crop A__ = crop_size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_convert_rgb def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase ( self , lowercase=False , lowercase=False , lowercase=False ) -> Union[str, Any]: '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: A__ = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: A__ = [] for i in range(self.batch_size ): A__ , A__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension A__ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] if torchify: A__ = [torch.from_numpy(lowercase ) for x in image_inputs] return image_inputs @require_torch @require_vision class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase ) @property def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , "do_resize" ) ) self.assertTrue(hasattr(lowercase , "size" ) ) self.assertTrue(hasattr(lowercase , "do_center_crop" ) ) self.assertTrue(hasattr(lowercase , "center_crop" ) ) self.assertTrue(hasattr(lowercase , "do_normalize" ) ) self.assertTrue(hasattr(lowercase , "image_mean" ) ) self.assertTrue(hasattr(lowercase , "image_std" ) ) self.assertTrue(hasattr(lowercase , "do_convert_rgb" ) ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 224, "width": 224} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase ) A__ = 3 @property def UpperCamelCase ( self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , "do_resize" ) ) self.assertTrue(hasattr(lowercase , "size" ) ) self.assertTrue(hasattr(lowercase , "do_center_crop" ) ) self.assertTrue(hasattr(lowercase , "center_crop" ) ) self.assertTrue(hasattr(lowercase , "do_normalize" ) ) self.assertTrue(hasattr(lowercase , "image_mean" ) ) self.assertTrue(hasattr(lowercase , "image_std" ) ) self.assertTrue(hasattr(lowercase , "do_convert_rgb" ) ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' pass def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
68
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
1
from math import factorial lowerCAmelCase__ = {str(d): factorial(d) for d in range(1_0)} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase__ ( ) -> int: '''simple docstring''' A__ = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , SCREAMING_SNAKE_CASE_ ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE_ ) == i ) if __name__ == "__main__": print(f"""{solution() = }""")
68
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
1
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Union[str, Any]=7 ) -> str: '''simple docstring''' A__ = None if token is not None: A__ = {"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'} # The id of a workflow (not of a workflow run) A__ = "636036" A__ = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' A__ = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() return result["workflow_runs"] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) A__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": A__ = workflow_run["id"] break return workflow_run_id def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str ) -> List[str]: '''simple docstring''' A__ = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) if workflow_run_id is not None: A__ = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: A__ = artifacts_links[artifact_name] download_artifact( artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ = {} for artifact_name in artifact_names: A__ = os.path.join(SCREAMING_SNAKE_CASE_ , F'{artifact_name}.zip' ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): A__ = {} with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file with z.open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read().decode("UTF-8" ) return results
68
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'open-llama' def __init__( self , lowercase=100000 , lowercase=4096 , lowercase=11008 , lowercase=32 , lowercase=32 , lowercase="silu" , lowercase=2048 , lowercase=0.02 , lowercase=1e-6 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=True , lowercase=None , **lowercase , ) -> Dict: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( "use_memorry_efficient_attention" , lowercase ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase , ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get("type" , lowercase ) A__ = self.rope_scaling.get("factor" , lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase , lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
68
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
1
import argparse import os import re lowerCAmelCase__ = """src/diffusers""" # Pattern that looks at the indentation in a line. lowerCAmelCase__ = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase__ = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase__ = re.compile(R"""\[([^\]]+)\]""") def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[str]: '''simple docstring''' A__ = _re_indent.search(SCREAMING_SNAKE_CASE_ ) return "" if search is None else search.groups()[0] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Dict="" , SCREAMING_SNAKE_CASE_: List[str]=None , SCREAMING_SNAKE_CASE_: List[str]=None ) -> int: '''simple docstring''' A__ = 0 A__ = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(SCREAMING_SNAKE_CASE_ ): index += 1 A__ = ["\n".join(lines[:index] )] else: A__ = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). A__ = [lines[index]] index += 1 while index < len(SCREAMING_SNAKE_CASE_ ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(SCREAMING_SNAKE_CASE_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(SCREAMING_SNAKE_CASE_ ) ) if index < len(SCREAMING_SNAKE_CASE_ ) - 1: A__ = [lines[index + 1]] index += 1 else: A__ = [] else: blocks.append("\n".join(SCREAMING_SNAKE_CASE_ ) ) A__ = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(SCREAMING_SNAKE_CASE_ ) > 0: blocks.append("\n".join(SCREAMING_SNAKE_CASE_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE_ ): blocks.append("\n".join(lines[index:] ) ) return blocks def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> int: '''simple docstring''' def _inner(SCREAMING_SNAKE_CASE_: List[Any] ): return key(SCREAMING_SNAKE_CASE_ ).lower().replace("_" , "" ) return _inner def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple=None ) -> int: '''simple docstring''' def noop(SCREAMING_SNAKE_CASE_: str ): return x if key is None: A__ = noop # Constants are all uppercase, they go first. A__ = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. A__ = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ )[0].isupper() and not key(SCREAMING_SNAKE_CASE_ ).isupper()] # Functions begin with a lowercase, they go last. A__ = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE_ )[0].isupper()] A__ = ignore_underscore(SCREAMING_SNAKE_CASE_ ) return sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> str: '''simple docstring''' def _replace(SCREAMING_SNAKE_CASE_: Dict ): A__ = match.groups()[0] if "," not in imports: return F'[{imports}]' A__ = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: A__ = keys[:-1] return "[" + ", ".join([F'"{k}"' for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) + "]" A__ = import_statement.split("\n" ) if len(SCREAMING_SNAKE_CASE_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. A__ = 2 if lines[1].strip() == "[" else 1 A__ = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] A__ = sort_objects(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] ) A__ = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(SCREAMING_SNAKE_CASE_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: A__ = _re_bracket_content.sub(_replace , lines[1] ) else: A__ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: A__ = keys[:-1] A__ = get_indent(lines[1] ) + ", ".join([F'"{k}"' for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) return "\n".join(SCREAMING_SNAKE_CASE_ ) else: # Finally we have to deal with imports fitting on one line A__ = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE_ ) return import_statement def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any]=True ) -> List[Any]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , "r" ) as f: A__ = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 A__ = split_code_in_indented_blocks( SCREAMING_SNAKE_CASE_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. A__ = main_blocks[block_idx] A__ = block.split("\n" ) # Get to the start of the imports. A__ = 0 while line_idx < len(SCREAMING_SNAKE_CASE_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: A__ = len(SCREAMING_SNAKE_CASE_ ) else: line_idx += 1 if line_idx >= len(SCREAMING_SNAKE_CASE_ ): continue # Ignore beginning and last line: they don't contain anything. A__ = "\n".join(block_lines[line_idx:-1] ) A__ = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. A__ = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE_ , indent_level=SCREAMING_SNAKE_CASE_ ) # We have two categories of import key: list or _import_structure[key].append/extend A__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. A__ = [(pattern.search(SCREAMING_SNAKE_CASE_ ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. A__ = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE_ ) if key is not None] A__ = [x[0] for x in sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. A__ = 0 A__ = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: A__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(SCREAMING_SNAKE_CASE_ ) count += 1 # And we put our main block back together with its first and last line. A__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(SCREAMING_SNAKE_CASE_ ): if check_only: return True else: print(F'Overwriting {file}.' ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write("\n".join(SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str]=True ) -> Optional[Any]: '''simple docstring''' A__ = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: A__ = sort_imports(os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE_ ) if result: A__ = [os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )] if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError(F'Would overwrite {len(SCREAMING_SNAKE_CASE_ )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
68
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
1
from __future__ import annotations class a__ : """simple docstring""" def __init__( self , lowercase=None ) -> Optional[int]: '''simple docstring''' A__ = data A__ = None def __repr__( self ) -> int: '''simple docstring''' A__ = [] A__ = self while temp: string_rep.append(F'{temp.data}' ) A__ = temp.next return "->".join(lowercase ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> List[str]: '''simple docstring''' if not elements_list: raise Exception("The Elements List is empty" ) A__ = A__ = Node(elements_list[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): A__ = Node(elements_list[i] ) A__ = current.next return head def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node ) -> None: '''simple docstring''' if head_node is not None and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' from doctest import testmod testmod() A__ = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print("Linked List:" ) print(SCREAMING_SNAKE_CASE_ ) print("Elements in Reverse:" ) print_reverse(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
68
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
1