code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import numpy class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple ): """simple docstring""" A__ = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. A__ = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. A__ = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. A__ = numpy.random.rand(3 , 1 ) # Real output values provided. A__ = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. A__ = numpy.zeros(output_array.shape ) def _a ( self : str ): """simple docstring""" A__ = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. A__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. A__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _a ( self : Union[str, Any] ): """simple docstring""" A__ = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) A__ = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) A__ = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _a ( self : Tuple , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple ): """simple docstring""" for iteration in range(1 , iterations + 1 ): A__ = self.feedforward() self.back_propagation() if give_loss: A__ = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def _a ( self : Any , _snake_case : Any ): """simple docstring""" A__ = input_arr A__ = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) A__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) A__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def A ( __UpperCamelCase ) -> str: return 1 / (1 + numpy.exp(-value )) def A ( __UpperCamelCase ) -> Tuple: return (value) * (1 - (value)) def A ( ) -> Union[str, Any]: A__ = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. A__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. A__ = TwoHiddenLayerNeuralNetwork( input_array=UpperCAmelCase__ , output_array=UpperCAmelCase__ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=UpperCAmelCase__ , iterations=10 , give_loss=UpperCAmelCase__ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
713
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
52
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A ( __UpperCamelCase , __UpperCamelCase ) -> int: A__ = checkpoint A__ = {} A__ = vae_state_dict['encoder.conv_in.weight'] A__ = vae_state_dict['encoder.conv_in.bias'] A__ = vae_state_dict['encoder.conv_out.weight'] A__ = vae_state_dict['encoder.conv_out.bias'] A__ = vae_state_dict['encoder.norm_out.weight'] A__ = vae_state_dict['encoder.norm_out.bias'] A__ = vae_state_dict['decoder.conv_in.weight'] A__ = vae_state_dict['decoder.conv_in.bias'] A__ = vae_state_dict['decoder.conv_out.weight'] A__ = vae_state_dict['decoder.conv_out.bias'] A__ = vae_state_dict['decoder.norm_out.weight'] A__ = vae_state_dict['decoder.norm_out.bias'] A__ = vae_state_dict['quant_conv.weight'] A__ = vae_state_dict['quant_conv.bias'] A__ = vae_state_dict['post_quant_conv.weight'] A__ = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) A__ = { layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCAmelCase__ ) } # Retrieves the keys for the decoder up blocks only A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) A__ = { layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCAmelCase__ ) } for i in range(UpperCAmelCase__ ): A__ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key] if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: A__ = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.weight''' ) A__ = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.bias''' ) A__ = renew_vae_resnet_paths(UpperCAmelCase__ ) A__ = {'old': f'''down.{i}.block''', 'new': f'''down_blocks.{i}.resnets'''} assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ ) A__ = [key for key in vae_state_dict if 'encoder.mid.block' in key] A__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): A__ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key] A__ = renew_vae_resnet_paths(UpperCAmelCase__ ) A__ = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ ) A__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key] A__ = renew_vae_attention_paths(UpperCAmelCase__ ) A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ ) conv_attn_to_linear(UpperCAmelCase__ ) for i in range(UpperCAmelCase__ ): A__ = num_up_blocks - 1 - i A__ = [ key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key ] if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: A__ = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.weight''' ] A__ = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.bias''' ] A__ = renew_vae_resnet_paths(UpperCAmelCase__ ) A__ = {'old': f'''up.{block_id}.block''', 'new': f'''up_blocks.{i}.resnets'''} assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ ) A__ = [key for key in vae_state_dict if 'decoder.mid.block' in key] A__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): A__ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key] A__ = renew_vae_resnet_paths(UpperCAmelCase__ ) A__ = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ ) A__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key] A__ = renew_vae_attention_paths(UpperCAmelCase__ ) A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ ) conv_attn_to_linear(UpperCAmelCase__ ) return new_checkpoint def A ( __UpperCamelCase , __UpperCamelCase , ) -> Tuple: # Only support V1 A__ = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) A__ = io.BytesIO(r.content ) A__ = OmegaConf.load(UpperCAmelCase__ ) A__ = 512 A__ = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open A__ = {} with safe_open(UpperCAmelCase__ , framework='pt' , device='cpu' ) as f: for key in f.keys(): A__ = f.get_tensor(UpperCAmelCase__ ) else: A__ = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )['state_dict'] # Convert the VAE model. A__ = create_vae_diffusers_config(UpperCAmelCase__ , image_size=UpperCAmelCase__ ) A__ = custom_convert_ldm_vae_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = AutoencoderKL(**UpperCAmelCase__ ) vae.load_state_dict(UpperCAmelCase__ ) vae.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
714
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
52
0
def A ( __UpperCamelCase = 200 ) -> int: A__ = [1, 2, 5, 10, 20, 50, 100, 200] A__ = [0] * (pence + 1) A__ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_UpperCamelCase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_0_0) == 7_3_6_8_2
715
from typing import TYPE_CHECKING from ..utils import _LazyModule SCREAMING_SNAKE_CASE__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from __future__ import annotations def A ( __UpperCamelCase ) -> Optional[int]: if not nums: return 0 A__ = nums[0] A__ = 0 for num in nums[1:]: A__ , A__ = ( max_excluding + num, max(__UpperCamelCase , __UpperCamelCase ), ) return max(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
716
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
52
0
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "snap-research/efficientformer-l1-300": ( "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" ), } class __lowerCAmelCase ( __lowerCAmelCase ): """simple docstring""" A__ : str = "efficientformer" def __init__( self : Any , _snake_case : List[int] = [3, 2, 6, 4] , _snake_case : List[int] = [48, 96, 2_24, 4_48] , _snake_case : List[bool] = [True, True, True, True] , _snake_case : int = 4_48 , _snake_case : int = 32 , _snake_case : int = 4 , _snake_case : int = 7 , _snake_case : int = 5 , _snake_case : int = 8 , _snake_case : int = 4 , _snake_case : float = 0.0 , _snake_case : int = 16 , _snake_case : int = 3 , _snake_case : int = 3 , _snake_case : int = 3 , _snake_case : int = 2 , _snake_case : int = 1 , _snake_case : float = 0.0 , _snake_case : int = 1 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : float = 1E-5 , _snake_case : str = "gelu" , _snake_case : float = 0.02 , _snake_case : float = 1E-12 , _snake_case : int = 2_24 , _snake_case : float = 1E-05 , **_snake_case : str , ): """simple docstring""" super().__init__(**lowerCamelCase__ ) A__ = hidden_act A__ = hidden_dropout_prob A__ = hidden_sizes A__ = num_hidden_layers A__ = num_attention_heads A__ = initializer_range A__ = layer_norm_eps A__ = patch_size A__ = num_channels A__ = depths A__ = mlp_expansion_ratio A__ = downsamples A__ = dim A__ = key_dim A__ = attention_ratio A__ = resolution A__ = pool_size A__ = downsample_patch_size A__ = downsample_stride A__ = downsample_pad A__ = drop_path_rate A__ = num_metaad_blocks A__ = distillation A__ = use_layer_scale A__ = layer_scale_init_value A__ = image_size A__ = batch_norm_eps
717
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
718
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
52
0
'''simple docstring''' from PIL import Image def A ( __UpperCamelCase , __UpperCamelCase ) -> int: def brightness(__UpperCamelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 SCREAMING_SNAKE_CASE__ = change_brightness(img, 1_0_0) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from __future__ import annotations from PIL import Image # Define glider example SCREAMING_SNAKE_CASE__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example SCREAMING_SNAKE_CASE__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def A ( __UpperCamelCase ) -> list[list[int]]: A__ = [] for i in range(len(lowerCamelCase__ ) ): A__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def A ( __UpperCamelCase , __UpperCamelCase ) -> list[Image.Image]: A__ = [] for _ in range(lowerCamelCase__ ): # Create output image A__ = Image.new('RGB' , (len(cells[0] ), len(lowerCamelCase__ )) ) A__ = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A__ = 255 - cells[y][x] * 255 A__ = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A__ = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
720
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( __UpperCamelCase ) -> Tuple: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [] if args.gold_data_mode == "qa": A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase ) for answer_list in data[1]: A__ = ast.literal_eval(__UpperCamelCase ) answers.append(__UpperCamelCase ) else: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [[reference] for reference in references] A__ = A__ = A__ = 0 for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 100.0 * em / total A__ = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = args.k A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = A__ = 0 for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ): A__ = set(hypo.split('\t' )[:k] ) A__ = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A__ = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: def strip_title(__UpperCamelCase ): if title.startswith('"' ): A__ = title[1:] if title.endswith('"' ): A__ = title[:-1] return title A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device ) A__ = rag_model.rag.question_encoder(__UpperCamelCase ) A__ = question_enc_outputs[0] A__ = rag_model.retriever( __UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A__ = [] for docs in all_docs: A__ = [strip_title(__UpperCamelCase ) for title in docs['title']] provenance_strings.append('\t'.join(__UpperCamelCase ) ) return provenance_strings def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: with torch.no_grad(): A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase ) A__ = inputs_dict.input_ids.to(args.device ) A__ = inputs_dict.attention_mask.to(args.device ) A__ = rag_model.generate( # rag_model overwrites generate __UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) if args.print_predictions: for q, a in zip(__UpperCamelCase , __UpperCamelCase ): logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) ) return answers def A ( ) -> Any: A__ = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) A__ = parser.parse_args() A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A ( __UpperCamelCase ) -> int: A__ = {} if args.model_type is None: A__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration A__ = args.n_docs if args.index_name is not None: A__ = args.index_name if args.index_path is not None: A__ = args.index_path else: A__ = BartForConditionalGeneration A__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase ) A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase ) model.retriever.init_retrieval() else: A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: A__ = [] for line in tqdm(__UpperCamelCase ): questions.append(line.strip() ) if len(__UpperCamelCase ) == args.eval_batch_size: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) + '\n' ) preds_file.flush() A__ = [] if len(__UpperCamelCase ) > 0: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) ) preds_file.flush() score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = get_args() main(args)
52
0
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed SCREAMING_SNAKE_CASE__ = '''true''' def A ( __UpperCamelCase , __UpperCamelCase=82 , __UpperCamelCase=16 ) -> List[Any]: set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(__UpperCamelCase ) A__ = RegressionDataset(length=__UpperCamelCase ) A__ = DataLoader(__UpperCamelCase , batch_size=__UpperCamelCase ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(__UpperCamelCase , __UpperCamelCase ) return model, ddp_model, dataloader def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Dict: A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(__UpperCamelCase ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs with accelerator.main_process_first(): A__ = dataset.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__UpperCamelCase ): if use_longest: return tokenizer.pad(__UpperCamelCase , padding='longest' , return_tensors='pt' ) return tokenizer.pad(__UpperCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=16 ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: A__ = Accelerator(dispatch_batches=__UpperCamelCase , split_batches=__UpperCamelCase ) A__ = get_dataloader(__UpperCamelCase , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__UpperCamelCase ) A__ , A__ = accelerator.prepare(__UpperCamelCase , __UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(__UpperCamelCase ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(__UpperCamelCase ) targs.append(__UpperCamelCase ) A__ , A__ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase ) return logits, targs def A ( __UpperCamelCase , __UpperCamelCase=82 , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=16 ) -> Union[str, Any]: A__ , A__ , A__ = get_basic_setup(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ , A__ = generate_predictions(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert ( len(__UpperCamelCase ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}''' def A ( __UpperCamelCase = False , __UpperCamelCase = False ) -> Dict: A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(__UpperCamelCase , __UpperCamelCase ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(__UpperCamelCase ) model.eval() for batch in dataloader: batch.to(__UpperCamelCase ) with torch.inference_mode(): A__ = model(**__UpperCamelCase ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__UpperCamelCase , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**__UpperCamelCase ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__UpperCamelCase , references=__UpperCamelCase ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def A ( ) -> List[Any]: A__ = Accelerator(split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__UpperCamelCase , __UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(__UpperCamelCase , 512 ) accelerator.state._reset_state() def A ( __UpperCamelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
721
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def _a ( self : Any ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ): """simple docstring""" A__ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Dict ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : Dict ): """simple docstring""" A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : int ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def _a ( self : List[str] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: A__ = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _a ( self : int ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Union[str, Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def _a ( self : List[Any] ): """simple docstring""" A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = model(**_snake_case ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
52
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
700
def A ( __UpperCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
52
0
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __lowerCAmelCase ( ctypes.Structure ): """simple docstring""" A__ : Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def A ( ) -> Dict: if os.name == "nt": A__ = CursorInfo() A__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) A__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def A ( ) -> Optional[int]: if os.name == "nt": A__ = CursorInfo() A__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) A__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def A ( ) -> int: try: hide_cursor() yield finally: show_cursor()
701
from typing import Dict from .base import GenericTensor, Pipeline class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ): """simple docstring""" if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ): """simple docstring""" A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Dict ): """simple docstring""" A__ = self.model(**_snake_case ) return model_outputs def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ): """simple docstring""" return super().__call__(*_snake_case , **_snake_case )
52
0
import torch from transformers import AutoModel class __lowerCAmelCase ( torch.nn.Module ): def __init__( self : Optional[Any] , _snake_case : int="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(_snake_case , self ).__init__() A__ = AutoModel.from_pretrained(_snake_case , return_dict=_snake_case ) A__ = torch.nn.CosineSimilarity(3 , 1E-08 ) A__ = torch.nn.Softmax(dim=1 ) def _a ( self : Any , **_snake_case : Dict ): """simple docstring""" return self.bert(**_snake_case ).last_hidden_state def _a ( self : Optional[int] , _snake_case : int ): """simple docstring""" return token_embeddings.sum(2 , keepdim=_snake_case ) def _a ( self : str , _snake_case : Tuple , _snake_case : Tuple , _snake_case : List[Any]=1 ): """simple docstring""" return self.softmax(T * self.cos(_snake_case , _snake_case ) ) def _a ( self : Any , _snake_case : int , _snake_case : Union[str, Any] ): """simple docstring""" A__ = W_supports['sizes'].tolist() A__ = W_supports['start_token_id'].item() A__ = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] A__ = self.BERT(**_snake_case ) A__ = self.BERT(**_snake_case ) A__ = None A__ = None A__ = W_supports['input_ids'] == start_token_id A__ = W_supports['input_ids'] == end_token_id for i, size in enumerate(_snake_case ): if i == 0: A__ = 0 else: A__ = support_sizes[i - 1] A__ = S[s : s + size][start_token_masks[s : s + size]] A__ = S[s : s + size][end_token_masks[s : s + size]] A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: A__ = torch.vstack((p_starts, p_start) ) A__ = torch.vstack((p_ends, p_end) ) else: A__ = p_start A__ = p_end return p_starts, p_ends
702
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) A__ : str = field(metadata={"help": "Should contain the data files for the task."} ) A__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: A__ = processors[data_args.task_name]() A__ = processor.get_labels() A__ = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase ) -> Dict: A__ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )} # Data collator A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A__ = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def A ( __UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
52
0
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def A ( __UpperCamelCase ) -> str: config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? A__ = tmp_path_factory.getbasetemp() / 'cache' A__ = test_hf_cache_home / 'datasets' A__ = test_hf_cache_home / 'metrics' A__ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) ) @pytest.fixture(autouse=__UpperCamelCase , scope='session' ) def A ( ) -> Union[str, Any]: datasets.disable_progress_bar() @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase ) -> int: # don't take tests into account when counting downloads monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase ) @pytest.fixture def A ( __UpperCamelCase ) -> Any: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
703
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
52
0
def A ( __UpperCamelCase ) -> int: if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('only integers accepted as input' ) else: A__ = str(abs(__UpperCamelCase ) ) A__ = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )] for index in range(len(__UpperCamelCase ) ): num_transpositions[index].pop(__UpperCamelCase ) return max( int(''.join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
704
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100' SCREAMING_SNAKE_CASE__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: SCREAMING_SNAKE_CASE__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: SCREAMING_SNAKE_CASE__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
52
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : int = (DDPMScheduler,) def _a ( self : Optional[Any] , **_snake_case : Any ): """simple docstring""" A__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**_snake_case ) return config def _a ( self : Dict ): """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_snake_case ) def _a ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case ) def _a ( self : List[str] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_snake_case ) def _a ( self : Any ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" self.check_over_configs(thresholding=_snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , ) def _a ( self : Optional[Any] ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case ) def _a ( self : List[Any] ): """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5 def _a ( self : str ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) A__ = len(_snake_case ) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual A__ = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ = pred_prev_sample A__ = torch.sum(torch.abs(_snake_case ) ) A__ = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def _a ( self : Dict ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type='v_prediction' ) A__ = scheduler_class(**_snake_case ) A__ = len(_snake_case ) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual A__ = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ = pred_prev_sample A__ = torch.sum(torch.abs(_snake_case ) ) A__ = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def _a ( self : Any ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) A__ = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_snake_case ) A__ = scheduler.timesteps for i, timestep in enumerate(_snake_case ): if i == len(_snake_case ) - 1: A__ = -1 else: A__ = timesteps[i + 1] A__ = scheduler.previous_timestep(_snake_case ) A__ = prev_t.item() self.assertEqual(_snake_case , _snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) A__ = [1_00, 87, 50, 51, 0] with self.assertRaises(_snake_case , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) A__ = [1_00, 87, 50, 1, 0] A__ = len(_snake_case ) with self.assertRaises(_snake_case , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) A__ = [scheduler.config.num_train_timesteps] with self.assertRaises( _snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=_snake_case )
705
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Any = IFInpaintingPipeline A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"} def _a ( self : Any ): """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): A__ = torch.manual_seed(_snake_case ) else: A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self : Dict ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _a ( self : int ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _a ( self : Optional[int] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _a ( self : List[str] ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _a ( self : Dict ): """simple docstring""" self._test_save_load_local() def _a ( self : Optional[int] ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
52
0
'''simple docstring''' SCREAMING_SNAKE_CASE__ = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
706
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) SCREAMING_SNAKE_CASE__ = r''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ): """simple docstring""" for processor in self: A__ = inspect.signature(processor.__call__ ).parameters if len(_snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' F'''{processor.__class__} are passed to the logits processor.''' ) A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case ) else: A__ = processor(_snake_case , _snake_case , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : float ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' ) A__ = temperature def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores / self.temperature return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) A__ = top_p A__ = filter_value A__ = min_tokens_to_keep def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] ) A__ = jnp.full_like(_snake_case , self.filter_value ) A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 ) A__ = cumulative_probs < self.top_p # include the token that is higher than top_p as well A__ = jnp.roll(_snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(_snake_case ) # min tokens to keep A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case ) A__ = jnp.where(_snake_case , _snake_case , _snake_case ) A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1] return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) A__ = max(_snake_case , _snake_case ) A__ = filter_value def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = scores.shape A__ = jnp.full(batch_size * vocab_size , self.filter_value ) A__ = min(self.top_k , scores.shape[-1] ) # Safety check A__ , A__ = lax.top_k(_snake_case , _snake_case ) A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A__ = topk_scores.flatten() A__ = topk_indices.flatten() + shift A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case ) A__ = next_scores_flat.reshape(_snake_case , _snake_case ) return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int ): """simple docstring""" A__ = bos_token_id def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int , _snake_case : int ): """simple docstring""" A__ = max_length A__ = eos_token_id def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) A__ = min_length A__ = eos_token_id def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ): """simple docstring""" A__ = list(_snake_case ) A__ = begin_index def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.bool_(cur_len - self.begin_index ) A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : list ): """simple docstring""" A__ = list(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , _snake_case : Optional[Any] ): """simple docstring""" A__ = dict(_snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A__ = force_token_array.at[index].set(_snake_case ) A__ = jnp.intaa(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" def _force_token(_snake_case : Dict ): A__ = scores.shape[0] A__ = self.force_token_array[generation_idx] A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' ) A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) ) return new_scores A__ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ): """simple docstring""" A__ = generate_config.eos_token_id A__ = generate_config.no_timestamps_token_id A__ = generate_config.no_timestamps_token_id + 1 A__ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_snake_case , 'max_initial_timestamp_index' ): A__ = generate_config.max_initial_timestamp_index else: A__ = model_config.vocab_size if self.max_initial_timestamp_index is None: A__ = model_config.vocab_size def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ): """simple docstring""" A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(_snake_case : Dict , _snake_case : str ): A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , ) A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , ) return jnp.where( _snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case ) A__ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , ) A__ = self.timestamp_begin + self.max_initial_timestamp_index A__ = jnp.where( _snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp A__ = jax.nn.log_softmax(_snake_case , axis=-1 ) def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ): A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A__ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) return scores
52
0
def A ( __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float]: # Check if the input is valid if not len(__UpperCamelCase ) == len(__UpperCamelCase ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients A__ , A__ , A__ = equationa A__ , A__ , A__ = equationa # Calculate the determinants of the matrices A__ = aa * ba - aa * ba A__ = ca * ba - ca * ba A__ = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: A__ = determinant_x / determinant A__ = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
707
import argparse import struct import unittest class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _snake_case : bytes ): """simple docstring""" A__ = data # Initialize hash values A__ = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants A__ = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] A__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _snake_case : bytes ): """simple docstring""" A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64)) A__ = struct.pack('>Q' , (len(_snake_case ) * 8) ) return data + padding + big_endian_integer def _a ( self : Optional[int] ): """simple docstring""" A__ = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A__ = list(struct.unpack('>16L' , _snake_case ) ) # add 48 0-ed integers words += [0] * 48 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A__ = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A__ = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 ) A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g) A__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 ) A__ = (a & b) ^ (a & c) ^ (b & c) A__ = (sa + maj) % 0x100000000 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A__ = [a, b, c, d, e, f, g, h] # Modify final values A__ = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" import hashlib A__ = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() ) def A ( ) -> None: import doctest doctest.testmod() A__ = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A__ = parser.parse_args() A__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A__ = f.read() else: A__ = bytes(__UpperCamelCase , 'utf-8' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
52
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A__ = AutoTokenizer.from_pretrained('google/mt5-small' ) A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean() A__ = -(labels.shape[-1] * loss.item()) A__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
708
import math import random def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value SCREAMING_SNAKE_CASE__ = 0.02 def A ( __UpperCamelCase , __UpperCamelCase ) -> float: A__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__UpperCamelCase ): # Forward propagation A__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A__ = (expected / 100) - layer_a # Error delta A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = int(input('''Expected value: ''')) SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
52
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def A ( __UpperCamelCase ) -> Optional[int]: # A local function to see if a dot lands in the circle. def is_in_circle(__UpperCamelCase , __UpperCamelCase ) -> bool: A__ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle A__ = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__UpperCamelCase ) ) # The ratio of the area for circle to square is pi/4. A__ = proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 , ) -> float: return mean( function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value) def A ( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 ) -> None: def identity_function(__UpperCamelCase ) -> float: return x A__ = area_under_curve_estimator( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('******************' ) def A ( __UpperCamelCase ) -> None: def function_to_integrate(__UpperCamelCase ) -> float: return sqrt(4.0 - x * x ) A__ = area_under_curve_estimator( __UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
709
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A__ = AutoTokenizer.from_pretrained('google/mt5-small' ) A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean() A__ = -(labels.shape[-1] * loss.item()) A__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
52
0
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) A__ : bool = field(default=UpperCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) A__ : bool = field(default=UpperCAmelCase_ , metadata={"help": "whether to use adafactor"} ) A__ : Optional[float] = field( default=UpperCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) A__ : Optional[float] = field( default=UpperCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) A__ : Optional[float] = field(default=UpperCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) A__ : Optional[float] = field( default=UpperCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) A__ : Optional[str] = field( default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
710
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = "roberta" def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
52
0
from typing import List from .keymap import KEYMAP, get_character def A ( __UpperCamelCase ) -> Any: def decorator(__UpperCamelCase ): A__ = getattr(__UpperCamelCase , 'handle_key' , [] ) handle += [key] setattr(__UpperCamelCase , 'handle_key' , __UpperCamelCase ) return func return decorator def A ( *__UpperCamelCase ) -> str: def decorator(__UpperCamelCase ): A__ = getattr(__UpperCamelCase , 'handle_key' , [] ) handle += keys setattr(__UpperCamelCase , 'handle_key' , __UpperCamelCase ) return func return decorator class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __new__( cls : Dict , _snake_case : int , _snake_case : Any , _snake_case : List[str] ): """simple docstring""" A__ = super().__new__(cls , _snake_case , _snake_case , _snake_case ) if not hasattr(_snake_case , 'key_handler' ): setattr(_snake_case , 'key_handler' , {} ) setattr(_snake_case , 'handle_input' , KeyHandler.handle_input ) for value in attrs.values(): A__ = getattr(_snake_case , 'handle_key' , [] ) for key in handled_keys: A__ = value return new_cls @staticmethod def _a ( cls : Optional[Any] ): """simple docstring""" A__ = get_character() if char != KEYMAP["undefined"]: A__ = ord(_snake_case ) A__ = cls.key_handler.get(_snake_case ) if handler: A__ = char return handler(cls ) else: return None def A ( cls ) -> Any: return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
711
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : int = LongformerTokenizer A__ : Optional[int] = True A__ : Any = LongformerTokenizerFast A__ : Dict = True def _a ( self : int ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) def _a ( self : int , **_snake_case : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Optional[int] , **_snake_case : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Any , _snake_case : Optional[Any] ): """simple docstring""" A__ = 'lower newer' A__ = 'lower newer' return input_text, output_text def _a ( self : Any ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = 'lower newer' A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True) self.assertListEqual(_snake_case , _snake_case ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() A__ = 'Encode this sequence.' A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_snake_case , _snake_case ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_snake_case , _snake_case ) # Testing spaces after special tokens A__ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space A__ = tokenizer.convert_tokens_to_ids(_snake_case ) A__ = 'Encode <mask> sequence' A__ = 'Encode <mask>sequence' A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_snake_case , _snake_case ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = 'A, <mask> AllenNLP sentence.' A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _a ( self : List[Any] ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['trim_offsets'] , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` A__ = F'''{text_of_1_token} {text_of_1_token}''' A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
52
0
'''simple docstring''' from __future__ import annotations def A ( __UpperCamelCase ) -> bool: A__ = str(__UpperCamelCase ) return n == n[::-1] def A ( __UpperCamelCase = 1_000_000 ) -> Dict: A__ = 0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split('b' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
712
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def A ( __UpperCamelCase ) -> str: config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? A__ = tmp_path_factory.getbasetemp() / 'cache' A__ = test_hf_cache_home / 'datasets' A__ = test_hf_cache_home / 'metrics' A__ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) ) @pytest.fixture(autouse=__UpperCamelCase , scope='session' ) def A ( ) -> Union[str, Any]: datasets.disable_progress_bar() @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase ) -> int: # don't take tests into account when counting downloads monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase ) @pytest.fixture def A ( __UpperCamelCase ) -> Any: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
52
0
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[Any] = CLIPConfig A__ : Dict = ["CLIPEncoderLayer"] def __init__( self : List[str] , _snake_case : CLIPConfig ): """simple docstring""" super().__init__(_snake_case ) A__ = CLIPVisionModelWithProjection(config.vision_config ) A__ = nn.Linear(config.vision_config.projection_dim , 1 ) A__ = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def _a ( self : Dict , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=0.5 , _snake_case : List[str]=0.5 ): """simple docstring""" A__ = self.vision_model(_snake_case )[0] A__ = self.p_head(_snake_case ) A__ = nsfw_detected.flatten() A__ = nsfw_detected > p_threshold A__ = nsfw_detected.tolist() if any(_snake_case ): logger.warning( 'Potential NSFW content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, nsfw_detected_ in enumerate(_snake_case ): if nsfw_detected_: A__ = np.zeros(images[idx].shape ) A__ = self.w_head(_snake_case ) A__ = watermark_detected.flatten() A__ = watermark_detected > w_threshold A__ = watermark_detected.tolist() if any(_snake_case ): logger.warning( 'Potential watermarked content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, watermark_detected_ in enumerate(_snake_case ): if watermark_detected_: A__ = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
713
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
52
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def A ( __UpperCamelCase ) -> bool: A__ = int(number**0.5 ) return number == sq * sq def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[int, int]: A__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den A__ = x_den * y_den * z_den A__ = gcd(__UpperCamelCase , __UpperCamelCase ) top //= hcf bottom //= hcf return top, bottom def A ( __UpperCamelCase = 35 ) -> int: A__ = set() A__ = 42 A__ = Fraction(0 ) A__ = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 A__ = x_num * y_den + x_den * y_num A__ = x_den * y_den A__ = gcd(__UpperCamelCase , __UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A__ = add_three( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) unique_s.add(__UpperCamelCase ) # n=2 A__ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) A__ = x_den * x_den * y_den * y_den if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ): A__ = int(sqrt(__UpperCamelCase ) ) A__ = int(sqrt(__UpperCamelCase ) ) A__ = gcd(__UpperCamelCase , __UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A__ = add_three( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) unique_s.add(__UpperCamelCase ) # n=-1 A__ = x_num * y_num A__ = x_den * y_num + x_num * y_den A__ = gcd(__UpperCamelCase , __UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A__ = add_three( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) unique_s.add(__UpperCamelCase ) # n=2 A__ = x_num * x_num * y_num * y_num A__ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ): A__ = int(sqrt(__UpperCamelCase ) ) A__ = int(sqrt(__UpperCamelCase ) ) A__ = gcd(__UpperCamelCase , __UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A__ = add_three( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) unique_s.add(__UpperCamelCase ) for num, den in unique_s: total += Fraction(__UpperCamelCase , __UpperCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(f'{solution() = }')
714
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
52
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def A ( __UpperCamelCase , __UpperCamelCase ) -> np.array: A__ = f'''{sampling_rate}''' A__ = '1' A__ = 'f32le' A__ = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(__UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: A__ = ffmpeg_process.communicate(__UpperCamelCase ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error A__ = output_stream[0] A__ = np.frombuffer(__UpperCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = "f32le" , ) -> Tuple: A__ = f'''{sampling_rate}''' A__ = '1' if format_for_conversion == "s16le": A__ = 2 elif format_for_conversion == "f32le": A__ = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) A__ = platform.system() if system == "Linux": A__ = 'alsa' A__ = 'default' elif system == "Darwin": A__ = 'avfoundation' A__ = ':0' elif system == "Windows": A__ = 'dshow' A__ = 'default' A__ = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] A__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample A__ = _ffmpeg_stream(__UpperCamelCase , __UpperCamelCase ) for item in iterator: yield item def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "f32le" , ) -> List[str]: if stream_chunk_s is not None: A__ = stream_chunk_s else: A__ = chunk_length_s A__ = ffmpeg_microphone(__UpperCamelCase , __UpperCamelCase , format_for_conversion=__UpperCamelCase ) if format_for_conversion == "s16le": A__ = np.intaa A__ = 2 elif format_for_conversion == "f32le": A__ = np.floataa A__ = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: A__ = chunk_length_s / 6 A__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(__UpperCamelCase , (int, float) ): A__ = [stride_length_s, stride_length_s] A__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample A__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample A__ = datetime.datetime.now() A__ = datetime.timedelta(seconds=__UpperCamelCase ) for item in chunk_bytes_iter(__UpperCamelCase , __UpperCamelCase , stride=(stride_left, stride_right) , stream=__UpperCamelCase ): # Put everything back in numpy scale A__ = np.frombuffer(item['raw'] , dtype=__UpperCamelCase ) A__ = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) A__ = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> Dict: A__ = b'' A__ , A__ = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) A__ = 0 for raw in iterator: acc += raw if stream and len(__UpperCamelCase ) < chunk_len: A__ = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(__UpperCamelCase ) >= chunk_len: # We are flushing the accumulator A__ = (_stride_left, stride_right) A__ = {'raw': acc[:chunk_len], 'stride': stride} if stream: A__ = False yield item A__ = stride_left A__ = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(__UpperCamelCase ) > stride_left: A__ = {'raw': acc, 'stride': (_stride_left, 0)} if stream: A__ = False yield item def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = 2**24 # 16Mo try: with subprocess.Popen(__UpperCamelCase , stdout=subprocess.PIPE , bufsize=__UpperCamelCase ) as ffmpeg_process: while True: A__ = ffmpeg_process.stdout.read(__UpperCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
715
from typing import TYPE_CHECKING from ..utils import _LazyModule SCREAMING_SNAKE_CASE__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = "roberta" def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
716
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
52
0
from __future__ import annotations class __lowerCAmelCase : """simple docstring""" def __init__( self : str , _snake_case : str , _snake_case : str ): """simple docstring""" A__ , A__ = text, pattern A__ , A__ = len(_snake_case ), len(_snake_case ) def _a ( self : Optional[Any] , _snake_case : str ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _a ( self : Any , _snake_case : int ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _a ( self : List[Any] ): """simple docstring""" A__ = [] for i in range(self.textLen - self.patLen + 1 ): A__ = self.mismatch_in_text(_snake_case ) if mismatch_index == -1: positions.append(_snake_case ) else: A__ = self.match_in_pattern(self.text[mismatch_index] ) A__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions SCREAMING_SNAKE_CASE__ = '''ABAABA''' SCREAMING_SNAKE_CASE__ = '''AB''' SCREAMING_SNAKE_CASE__ = BoyerMooreSearch(text, pattern) SCREAMING_SNAKE_CASE__ = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
717
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Dict = "levit" def __init__( self : int , _snake_case : Optional[Any]=2_24 , _snake_case : Tuple=3 , _snake_case : Optional[int]=3 , _snake_case : str=2 , _snake_case : Union[str, Any]=1 , _snake_case : Any=16 , _snake_case : List[str]=[1_28, 2_56, 3_84] , _snake_case : Tuple=[4, 8, 12] , _snake_case : Dict=[4, 4, 4] , _snake_case : Optional[int]=[16, 16, 16] , _snake_case : List[Any]=0 , _snake_case : str=[2, 2, 2] , _snake_case : Optional[Any]=[2, 2, 2] , _snake_case : Dict=0.02 , **_snake_case : Union[str, Any] , ): """simple docstring""" super().__init__(**_snake_case ) A__ = image_size A__ = num_channels A__ = kernel_size A__ = stride A__ = padding A__ = hidden_sizes A__ = num_attention_heads A__ = depths A__ = key_dim A__ = drop_path_rate A__ = patch_size A__ = attention_ratio A__ = mlp_ratio A__ = initializer_range A__ = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[Any] = version.parse("1.11" ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _a ( self : List[Any] ): """simple docstring""" return 1E-4
718
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
52
0
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : str , _snake_case : str=13 , _snake_case : Union[str, Any]=32 , _snake_case : Tuple=3 , _snake_case : List[str]=4 , _snake_case : Optional[Any]=[10, 20, 30, 40] , _snake_case : List[str]=[2, 2, 3, 2] , _snake_case : Any=True , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=37 , _snake_case : List[str]="gelu" , _snake_case : str=10 , _snake_case : int=0.02 , _snake_case : Optional[Any]=["stage2", "stage3", "stage4"] , _snake_case : Optional[int]=[2, 3, 4] , _snake_case : int=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_stages A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = intermediate_size A__ = hidden_act A__ = num_labels A__ = initializer_range A__ = out_features A__ = out_indices A__ = scope def _a ( self : List[Any] ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Any ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _a ( self : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ): """simple docstring""" A__ = ConvNextModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[str] ): """simple docstring""" A__ = ConvNextForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Dict ): """simple docstring""" A__ = ConvNextBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A__ = None A__ = ConvNextBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Any = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) A__ : Dict = True A__ : Optional[int] = False A__ : str = False A__ : Tuple = False A__ : List[str] = False def _a ( self : Optional[Any] ): """simple docstring""" A__ = ConvNextModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : Dict ): """simple docstring""" return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def _a ( self : int ): """simple docstring""" pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def _a ( self : str ): """simple docstring""" pass def _a ( self : Optional[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : int ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_snake_case ) def _a ( self : Tuple ): """simple docstring""" def check_hidden_states_output(_snake_case : int , _snake_case : int , _snake_case : Union[str, Any] ): A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(_snake_case ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def _a ( self : str ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ConvNextModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Optional[Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[Any] ): """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def _a ( self : List[str] ): """simple docstring""" A__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase , UpperCAmelCase_ ): """simple docstring""" A__ : Dict = (ConvNextBackbone,) if is_torch_available() else () A__ : Tuple = ConvNextConfig A__ : Union[str, Any] = False def _a ( self : Union[str, Any] ): """simple docstring""" A__ = ConvNextModelTester(self )
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
720
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( __UpperCamelCase ) -> Tuple: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [] if args.gold_data_mode == "qa": A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase ) for answer_list in data[1]: A__ = ast.literal_eval(__UpperCamelCase ) answers.append(__UpperCamelCase ) else: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [[reference] for reference in references] A__ = A__ = A__ = 0 for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 100.0 * em / total A__ = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = args.k A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = A__ = 0 for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ): A__ = set(hypo.split('\t' )[:k] ) A__ = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A__ = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: def strip_title(__UpperCamelCase ): if title.startswith('"' ): A__ = title[1:] if title.endswith('"' ): A__ = title[:-1] return title A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device ) A__ = rag_model.rag.question_encoder(__UpperCamelCase ) A__ = question_enc_outputs[0] A__ = rag_model.retriever( __UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A__ = [] for docs in all_docs: A__ = [strip_title(__UpperCamelCase ) for title in docs['title']] provenance_strings.append('\t'.join(__UpperCamelCase ) ) return provenance_strings def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: with torch.no_grad(): A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase ) A__ = inputs_dict.input_ids.to(args.device ) A__ = inputs_dict.attention_mask.to(args.device ) A__ = rag_model.generate( # rag_model overwrites generate __UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) if args.print_predictions: for q, a in zip(__UpperCamelCase , __UpperCamelCase ): logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) ) return answers def A ( ) -> Any: A__ = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) A__ = parser.parse_args() A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A ( __UpperCamelCase ) -> int: A__ = {} if args.model_type is None: A__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration A__ = args.n_docs if args.index_name is not None: A__ = args.index_name if args.index_path is not None: A__ = args.index_path else: A__ = BartForConditionalGeneration A__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase ) A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase ) model.retriever.init_retrieval() else: A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: A__ = [] for line in tqdm(__UpperCamelCase ): questions.append(line.strip() ) if len(__UpperCamelCase ) == args.eval_batch_size: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) + '\n' ) preds_file.flush() A__ = [] if len(__UpperCamelCase ) > 0: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) ) preds_file.flush() score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = get_args() main(args)
52
0
import math def A ( __UpperCamelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A ( __UpperCamelCase = 0.1 ) -> int: A__ = 3 A__ = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(__UpperCamelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
721
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def _a ( self : Any ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ): """simple docstring""" A__ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Dict ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : Dict ): """simple docstring""" A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : int ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def _a ( self : List[str] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: A__ = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _a ( self : int ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Union[str, Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def _a ( self : List[Any] ): """simple docstring""" A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = model(**_snake_case ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
52
0
from math import factorial SCREAMING_SNAKE_CASE__ = {str(digit): factorial(digit) for digit in range(1_0)} def A ( __UpperCamelCase ) -> int: if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) ) def A ( __UpperCamelCase = 60 , __UpperCamelCase = 1_000_000 ) -> int: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length A__ = 0 # the cached sizes of the previous chains A__ = {} for start_chain_element in range(1 , __UpperCamelCase ): # The temporary set will contain the elements of the chain A__ = set() A__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. A__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__UpperCamelCase ) chain_set_length += 1 A__ = digit_factorial_sum(__UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] A__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution()}')
700
def A ( __UpperCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
52
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
701
from typing import Dict from .base import GenericTensor, Pipeline class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ): """simple docstring""" if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ): """simple docstring""" A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Dict ): """simple docstring""" A__ = self.model(**_snake_case ) return model_outputs def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ): """simple docstring""" return super().__call__(*_snake_case , **_snake_case )
52
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowerCAmelCase ( unittest.TestCase ): A__ : Optional[int] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _a ( self : List[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Union[str, Any] ): """simple docstring""" A__ = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) A__ = VideoClassificationPipeline(model=_snake_case , image_processor=_snake_case , top_k=2 ) A__ = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Dict ): """simple docstring""" for example in examples: A__ = video_classifier(_snake_case ) self.assertEqual( _snake_case , [ {'score': ANY(_snake_case ), 'label': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'label': ANY(_snake_case )}, ] , ) @require_torch def _a ( self : Dict ): """simple docstring""" A__ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' A__ = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) A__ = pipeline( 'video-classification' , model=_snake_case , feature_extractor=_snake_case , frame_sampling_rate=4 ) A__ = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) A__ = video_classifier(_snake_case , top_k=2 ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , ) A__ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}], [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}], ] , ) @require_tf def _a ( self : List[Any] ): """simple docstring""" pass
702
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) A__ : str = field(metadata={"help": "Should contain the data files for the task."} ) A__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: A__ = processors[data_args.task_name]() A__ = processor.get_labels() A__ = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase ) -> Dict: A__ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )} # Data collator A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A__ = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def A ( __UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
52
0
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = (UnCLIPScheduler,) def _a ( self : List[str] , **_snake_case : Union[str, Any] ): """simple docstring""" A__ = { 'num_train_timesteps': 10_00, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**_snake_case ) return config def _a ( self : List[str] ): """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_snake_case ) def _a ( self : List[str] ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_snake_case ) def _a ( self : str ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_snake_case ) def _a ( self : Union[str, Any] ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_snake_case ) def _a ( self : List[Any] ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_snake_case ) def _a ( self : str ): """simple docstring""" for time_step in [0, 5_00, 9_99]: for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_snake_case , prev_timestep=_snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(variance_type='fixed_small_log' ) A__ = scheduler_class(**_snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5 def _a ( self : Union[str, Any] ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(variance_type='learned_range' ) A__ = scheduler_class(**_snake_case ) A__ = 0.5 assert scheduler._get_variance(1 , predicted_variance=_snake_case ) - -10.171_2790 < 1E-5 assert scheduler._get_variance(4_87 , predicted_variance=_snake_case ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(9_99 , predicted_variance=_snake_case ) - -0.001_0011 < 1E-5 def _a ( self : Union[str, Any] ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) A__ = scheduler.timesteps A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0 ) for i, t in enumerate(_snake_case ): # 1. predict noise residual A__ = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample A__ = pred_prev_sample A__ = torch.sum(torch.abs(_snake_case ) ) A__ = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def _a ( self : Optional[int] ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**_snake_case ) scheduler.set_timesteps(25 ) A__ = scheduler.timesteps A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0 ) for i, t in enumerate(_snake_case ): # 1. predict noise residual A__ = model(_snake_case , _snake_case ) if i + 1 == timesteps.shape[0]: A__ = None else: A__ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 A__ = scheduler.step( _snake_case , _snake_case , _snake_case , prev_timestep=_snake_case , generator=_snake_case ).prev_sample A__ = pred_prev_sample A__ = torch.sum(torch.abs(_snake_case ) ) A__ = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def _a ( self : str ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" pass
703
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
52
0
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ = imread(r'''digital_image_processing/image_data/lena_small.jpg''') SCREAMING_SNAKE_CASE__ = cvtColor(img, COLOR_BGR2GRAY) def A ( ) -> List[str]: A__ = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def A ( ) -> Tuple: with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def A ( ) -> Optional[int]: A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def A ( ) -> Any: A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() A__ = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def A ( ) -> List[str]: assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def A ( ) -> Optional[int]: # laplace diagonals A__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A__ = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def A ( ) -> Any: assert med.median_filter(__UpperCamelCase , 3 ).any() def A ( ) -> Optional[Any]: A__ , A__ = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def A ( ) -> Dict: A__ = sp.make_sepia(__UpperCamelCase , 20 ) assert sepia.all() def A ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: A__ = bs.Burkes(imread(__UpperCamelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def A ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[int]: A__ = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def A ( ) -> List[str]: A__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. A__ = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None A__ = 0 A__ = 0 A__ = image[x_coordinate][y_coordinate] A__ = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A__ = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
704
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100' SCREAMING_SNAKE_CASE__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: SCREAMING_SNAKE_CASE__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: SCREAMING_SNAKE_CASE__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
52
0
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , _snake_case : int = 16 , _snake_case : int = 88 , _snake_case : Optional[int] = None , _snake_case : int = 1 , _snake_case : float = 0.0 , _snake_case : int = 32 , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "geglu" , _snake_case : Optional[int] = None , ): """simple docstring""" super().__init__() A__ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_snake_case , attention_head_dim=_snake_case , in_channels=_snake_case , num_layers=_snake_case , dropout=_snake_case , norm_num_groups=_snake_case , cross_attention_dim=_snake_case , attention_bias=_snake_case , sample_size=_snake_case , num_vector_embeds=_snake_case , activation_fn=_snake_case , num_embeds_ada_norm=_snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference A__ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` A__ = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` A__ = [1, 0] def _a ( self : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : str=None , _snake_case : Optional[int]=None , _snake_case : Union[str, Any]=None , _snake_case : bool = True , ): """simple docstring""" A__ = hidden_states A__ = [] A__ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] A__ = self.transformer_index_for_condition[i] A__ = self.transformers[transformer_index]( _snake_case , encoder_hidden_states=_snake_case , timestep=_snake_case , cross_attention_kwargs=_snake_case , return_dict=_snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) A__ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_snake_case )
705
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Any = IFInpaintingPipeline A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"} def _a ( self : Any ): """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): A__ = torch.manual_seed(_snake_case ) else: A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self : Dict ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _a ( self : int ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _a ( self : Optional[int] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _a ( self : List[str] ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _a ( self : Dict ): """simple docstring""" self._test_save_load_local() def _a ( self : Optional[int] ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
52
0
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100' SCREAMING_SNAKE_CASE__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: SCREAMING_SNAKE_CASE__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: SCREAMING_SNAKE_CASE__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
706
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) SCREAMING_SNAKE_CASE__ = r''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ): """simple docstring""" for processor in self: A__ = inspect.signature(processor.__call__ ).parameters if len(_snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' F'''{processor.__class__} are passed to the logits processor.''' ) A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case ) else: A__ = processor(_snake_case , _snake_case , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : float ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' ) A__ = temperature def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores / self.temperature return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) A__ = top_p A__ = filter_value A__ = min_tokens_to_keep def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] ) A__ = jnp.full_like(_snake_case , self.filter_value ) A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 ) A__ = cumulative_probs < self.top_p # include the token that is higher than top_p as well A__ = jnp.roll(_snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(_snake_case ) # min tokens to keep A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case ) A__ = jnp.where(_snake_case , _snake_case , _snake_case ) A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1] return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) A__ = max(_snake_case , _snake_case ) A__ = filter_value def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = scores.shape A__ = jnp.full(batch_size * vocab_size , self.filter_value ) A__ = min(self.top_k , scores.shape[-1] ) # Safety check A__ , A__ = lax.top_k(_snake_case , _snake_case ) A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A__ = topk_scores.flatten() A__ = topk_indices.flatten() + shift A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case ) A__ = next_scores_flat.reshape(_snake_case , _snake_case ) return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int ): """simple docstring""" A__ = bos_token_id def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int , _snake_case : int ): """simple docstring""" A__ = max_length A__ = eos_token_id def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) A__ = min_length A__ = eos_token_id def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ): """simple docstring""" A__ = list(_snake_case ) A__ = begin_index def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.bool_(cur_len - self.begin_index ) A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : list ): """simple docstring""" A__ = list(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , _snake_case : Optional[Any] ): """simple docstring""" A__ = dict(_snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A__ = force_token_array.at[index].set(_snake_case ) A__ = jnp.intaa(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" def _force_token(_snake_case : Dict ): A__ = scores.shape[0] A__ = self.force_token_array[generation_idx] A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' ) A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) ) return new_scores A__ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ): """simple docstring""" A__ = generate_config.eos_token_id A__ = generate_config.no_timestamps_token_id A__ = generate_config.no_timestamps_token_id + 1 A__ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_snake_case , 'max_initial_timestamp_index' ): A__ = generate_config.max_initial_timestamp_index else: A__ = model_config.vocab_size if self.max_initial_timestamp_index is None: A__ = model_config.vocab_size def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ): """simple docstring""" A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(_snake_case : Dict , _snake_case : str ): A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , ) A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , ) return jnp.where( _snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case ) A__ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , ) A__ = self.timestamp_begin + self.max_initial_timestamp_index A__ = jnp.where( _snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp A__ = jax.nn.log_softmax(_snake_case , axis=-1 ) def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ): A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A__ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) return scores
52
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''LayoutLMv2FeatureExtractor'''] SCREAMING_SNAKE_CASE__ = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
707
import argparse import struct import unittest class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _snake_case : bytes ): """simple docstring""" A__ = data # Initialize hash values A__ = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants A__ = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] A__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _snake_case : bytes ): """simple docstring""" A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64)) A__ = struct.pack('>Q' , (len(_snake_case ) * 8) ) return data + padding + big_endian_integer def _a ( self : Optional[int] ): """simple docstring""" A__ = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A__ = list(struct.unpack('>16L' , _snake_case ) ) # add 48 0-ed integers words += [0] * 48 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A__ = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A__ = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 ) A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g) A__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 ) A__ = (a & b) ^ (a & c) ^ (b & c) A__ = (sa + maj) % 0x100000000 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A__ = [a, b, c, d, e, f, g, h] # Modify final values A__ = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" import hashlib A__ = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() ) def A ( ) -> None: import doctest doctest.testmod() A__ = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A__ = parser.parse_args() A__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A__ = f.read() else: A__ = bytes(__UpperCamelCase , 'utf-8' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
52
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ = { '''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''], '''tokenization_xlm''': ['''XLMTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMForMultipleChoice''', '''XLMForQuestionAnswering''', '''XLMForQuestionAnsweringSimple''', '''XLMForSequenceClassification''', '''XLMForTokenClassification''', '''XLMModel''', '''XLMPreTrainedModel''', '''XLMWithLMHeadModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMForMultipleChoice''', '''TFXLMForQuestionAnsweringSimple''', '''TFXLMForSequenceClassification''', '''TFXLMForTokenClassification''', '''TFXLMMainLayer''', '''TFXLMModel''', '''TFXLMPreTrainedModel''', '''TFXLMWithLMHeadModel''', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
708
import math import random def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value SCREAMING_SNAKE_CASE__ = 0.02 def A ( __UpperCamelCase , __UpperCamelCase ) -> float: A__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__UpperCamelCase ): # Forward propagation A__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A__ = (expected / 100) - layer_a # Error delta A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = int(input('''Expected value: ''')) SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
52
0
import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowerCAmelCase ( UpperCAmelCase_): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) ) self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) ) class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=13 , _snake_case : Union[str, Any]=64 , _snake_case : int=3 , _snake_case : Dict=4 , _snake_case : Dict=[2, 2, 2, 2] , _snake_case : Dict=[8, 4, 2, 1] , _snake_case : Any=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : Any=[1, 2, 4, 8] , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=True , _snake_case : Any="gelu" , _snake_case : str=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[int]=0.02 , _snake_case : str=3 , _snake_case : str=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_encoder_blocks A__ = sr_ratios A__ = depths A__ = hidden_sizes A__ = downsampling_rates A__ = num_attention_heads A__ = is_training A__ = use_labels A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = num_labels A__ = scope def _a ( self : Optional[Any] ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : List[Any] ): """simple docstring""" return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _a ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] ): """simple docstring""" A__ = SegformerModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = self.num_labels A__ = SegformerForSemanticSegmentation(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any ): """simple docstring""" A__ = 1 A__ = SegformerForSemanticSegmentation(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case ) A__ = model(_snake_case , labels=_snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase): """simple docstring""" A__ : Dict = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) A__ : str = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) A__ : Any = True A__ : List[Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : str ): """simple docstring""" A__ = SegformerModelTester(self ) A__ = SegformerConfigTester(self , config_class=_snake_case ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : Dict ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case ) def _a ( self : int ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*_snake_case ) @unittest.skip('SegFormer does not use inputs_embeds' ) def _a ( self : Dict ): """simple docstring""" pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.attentions A__ = sum(self.model_tester.depths ) self.assertEqual(len(_snake_case ) , _snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first attentions (first block, first layer) A__ = (self.model_tester.image_size // 4) ** 2 A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ = (self.model_tester.image_size // 32) ** 2 A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ = len(_snake_case ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) self.assertEqual(out_len + 1 , len(_snake_case ) ) A__ = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first attentions (first block, first layer) A__ = (self.model_tester.image_size // 4) ** 2 A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _a ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(_snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str ): A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_encoder_blocks self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) def _a ( self : Tuple ): """simple docstring""" if not self.model_tester.is_training: return A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: if model_class in get_values(_snake_case ): continue A__ = model_class(_snake_case ) model.to(_snake_case ) model.train() A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) A__ = model(**_snake_case ).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _a ( self : Optional[Any] ): """simple docstring""" pass @slow def _a ( self : Dict ): """simple docstring""" for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = SegformerModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Optional[Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase): """simple docstring""" @slow def _a ( self : Dict ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( _snake_case ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = encoded_inputs.pixel_values.to(_snake_case ) with torch.no_grad(): A__ = model(_snake_case ) A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) A__ = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = encoded_inputs.pixel_values.to(_snake_case ) with torch.no_grad(): A__ = model(_snake_case ) A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( _snake_case ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = encoded_inputs.pixel_values.to(_snake_case ) with torch.no_grad(): A__ = model(_snake_case ) A__ = outputs.logits.detach().cpu() A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] ) A__ = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , _snake_case ) A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case ) A__ = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , _snake_case )
709
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A__ = AutoTokenizer.from_pretrained('google/mt5-small' ) A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean() A__ = -(labels.shape[-1] * loss.item()) A__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
52
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
710
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = "roberta" def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
52
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {} class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Optional[Any] = "llama" A__ : str = ["past_key_values"] def __init__( self : Union[str, Any] , _snake_case : str=3_20_00 , _snake_case : int=40_96 , _snake_case : Optional[int]=1_10_08 , _snake_case : int=32 , _snake_case : Optional[int]=32 , _snake_case : Dict=None , _snake_case : Tuple="silu" , _snake_case : Union[str, Any]=20_48 , _snake_case : Dict=0.02 , _snake_case : List[str]=1E-6 , _snake_case : str=True , _snake_case : int=0 , _snake_case : List[Any]=1 , _snake_case : Any=2 , _snake_case : List[str]=1 , _snake_case : Dict=False , _snake_case : Optional[Any]=None , **_snake_case : Any , ): """simple docstring""" A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: A__ = num_attention_heads A__ = num_key_value_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = pretraining_tp A__ = use_cache A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , ) def _a ( self : List[str] ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'''got {self.rope_scaling}''' ) A__ = self.rope_scaling.get('type' , _snake_case ) A__ = self.rope_scaling.get('factor' , _snake_case ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
711
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : int = LongformerTokenizer A__ : Optional[int] = True A__ : Any = LongformerTokenizerFast A__ : Dict = True def _a ( self : int ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) def _a ( self : int , **_snake_case : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Optional[int] , **_snake_case : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Any , _snake_case : Optional[Any] ): """simple docstring""" A__ = 'lower newer' A__ = 'lower newer' return input_text, output_text def _a ( self : Any ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = 'lower newer' A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True) self.assertListEqual(_snake_case , _snake_case ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() A__ = 'Encode this sequence.' A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_snake_case , _snake_case ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_snake_case , _snake_case ) # Testing spaces after special tokens A__ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space A__ = tokenizer.convert_tokens_to_ids(_snake_case ) A__ = 'Encode <mask> sequence' A__ = 'Encode <mask>sequence' A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_snake_case , _snake_case ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = 'A, <mask> AllenNLP sentence.' A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _a ( self : List[Any] ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['trim_offsets'] , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` A__ = F'''{text_of_1_token} {text_of_1_token}''' A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
52
0
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
712
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def A ( __UpperCamelCase ) -> str: config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? A__ = tmp_path_factory.getbasetemp() / 'cache' A__ = test_hf_cache_home / 'datasets' A__ = test_hf_cache_home / 'metrics' A__ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) ) @pytest.fixture(autouse=__UpperCamelCase , scope='session' ) def A ( ) -> Union[str, Any]: datasets.disable_progress_bar() @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase ) -> int: # don't take tests into account when counting downloads monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase ) @pytest.fixture def A ( __UpperCamelCase ) -> Any: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
52
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) @add_end_docstrings( UpperCAmelCase_ , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Union[str, Any] , _snake_case : GenericTensor ): """simple docstring""" if self.framework == "tf": A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case ) else: raise ValueError('Unsupported framework' ) return masked_index def _a ( self : List[Any] , _snake_case : GenericTensor ): """simple docstring""" A__ = self.get_masked_index(_snake_case ) A__ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def _a ( self : Any , _snake_case : GenericTensor ): """simple docstring""" if isinstance(_snake_case , _snake_case ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_snake_case ) def _a ( self : str , _snake_case : Tuple , _snake_case : Union[str, Any]=None , **_snake_case : Dict ): """simple docstring""" if return_tensors is None: A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case ) self.ensure_exactly_one_mask_token(_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Union[str, Any] ): """simple docstring""" A__ = self.model(**_snake_case ) A__ = model_inputs['input_ids'] return model_outputs def _a ( self : int , _snake_case : Any , _snake_case : Dict=5 , _snake_case : List[str]=None ): """simple docstring""" if target_ids is not None and target_ids.shape[0] < top_k: A__ = target_ids.shape[0] A__ = model_outputs['input_ids'][0] A__ = model_outputs['logits'] if self.framework == "tf": A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] A__ = outputs.numpy() A__ = outputs[0, masked_index, :] A__ = stable_softmax(_snake_case , axis=-1 ) if target_ids is not None: A__ = tf.gather_nd(tf.squeeze(_snake_case , 0 ) , target_ids.reshape(-1 , 1 ) ) A__ = tf.expand_dims(_snake_case , 0 ) A__ = tf.math.top_k(_snake_case , k=_snake_case ) A__ , A__ = topk.values.numpy(), topk.indices.numpy() else: A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample A__ = outputs[0, masked_index, :] A__ = logits.softmax(dim=-1 ) if target_ids is not None: A__ = probs[..., target_ids] A__ , A__ = probs.topk(_snake_case ) A__ = [] A__ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): A__ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place A__ = input_ids.numpy().copy() if target_ids is not None: A__ = target_ids[p].tolist() A__ = p # Filter padding out: A__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back A__ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case ) A__ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(_snake_case ) result.append(_snake_case ) if single_mask: return result[0] return result def _a ( self : List[str] , _snake_case : List[str] , _snake_case : List[Any]=None ): """simple docstring""" if isinstance(_snake_case , _snake_case ): A__ = [targets] try: A__ = self.tokenizer.get_vocab() except Exception: A__ = {} A__ = [] for target in targets: A__ = vocab.get(_snake_case , _snake_case ) if id_ is None: A__ = self.tokenizer( _snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['input_ids'] if len(_snake_case ) == 0: logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' 'We cannot replace it with anything meaningful, ignoring it' ) continue A__ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) A__ = list(set(_snake_case ) ) if len(_snake_case ) == 0: raise ValueError('At least one target must be provided when passed.' ) A__ = np.array(_snake_case ) return target_ids def _a ( self : List[Any] , _snake_case : List[Any]=None , _snake_case : Any=None ): """simple docstring""" A__ = {} if targets is not None: A__ = self.get_target_ids(_snake_case , _snake_case ) A__ = target_ids if top_k is not None: A__ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self : Any , _snake_case : Optional[int] , *_snake_case : List[Any] , **_snake_case : Any ): """simple docstring""" A__ = super().__call__(_snake_case , **_snake_case ) if isinstance(_snake_case , _snake_case ) and len(_snake_case ) == 1: return outputs[0] return outputs
713
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
52
0
from collections import defaultdict def A ( __UpperCamelCase , __UpperCamelCase ) -> bool: A__ = first_str.lower().strip() A__ = second_str.lower().strip() # Remove whitespace A__ = first_str.replace(' ' , '' ) A__ = second_str.replace(' ' , '' ) # Strings of different lengths are not anagrams if len(__UpperCamelCase ) != len(__UpperCamelCase ): return False # Default values for count should be 0 A__ = defaultdict(__UpperCamelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(__UpperCamelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() SCREAMING_SNAKE_CASE__ = input('''Enter the first string ''').strip() SCREAMING_SNAKE_CASE__ = input('''Enter the second string ''').strip() SCREAMING_SNAKE_CASE__ = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
714
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
52
0
from pathlib import Path import fire from tqdm import tqdm def A ( __UpperCamelCase="ro" , __UpperCamelCase="en" , __UpperCamelCase="wmt16" , __UpperCamelCase=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('run pip install datasets' ) A__ = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) A__ = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase ) if save_dir is None: A__ = f'''{dataset}-{pair}''' A__ = Path(__UpperCamelCase ) save_dir.mkdir(exist_ok=__UpperCamelCase ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets A__ = 'val' if split == 'validation' else split A__ = save_dir.joinpath(f'''{fn}.source''' ) A__ = save_dir.joinpath(f'''{fn}.target''' ) A__ = src_path.open('w+' ) A__ = tgt_path.open('w+' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): A__ = x['translation'] src_fp.write(ex[src_lang] + '\n' ) tgt_fp.write(ex[tgt_lang] + '\n' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
715
from typing import TYPE_CHECKING from ..utils import _LazyModule SCREAMING_SNAKE_CASE__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from __future__ import annotations import os from collections.abc import Mapping SCREAMING_SNAKE_CASE__ = tuple[int, int] class __lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , _snake_case : set[int] , _snake_case : Mapping[EdgeT, int] ): """simple docstring""" A__ = vertices A__ = { (min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items() } def _a ( self : Optional[Any] , _snake_case : EdgeT , _snake_case : int ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) A__ = weight def _a ( self : List[str] ): """simple docstring""" A__ = Graph({min(self.vertices )} , {} ) A__ = 42 A__ = 42 A__ = 42 A__ = 42 while len(subgraph.vertices ) < len(self.vertices ): A__ = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: A__ = edge A__ = weight subgraph.add_edge(_snake_case , _snake_case ) return subgraph def A ( __UpperCamelCase = "p107_network.txt" ) -> int: A__ = os.path.abspath(os.path.dirname(__UpperCamelCase ) ) A__ = os.path.join(__UpperCamelCase , __UpperCamelCase ) A__ = {} A__ = 42 A__ = 42 A__ = 42 with open(__UpperCamelCase ) as f: A__ = f.read().strip().split('\n' ) A__ = [line.split(',' ) for line in data] for edgea in range(1 , len(__UpperCamelCase ) ): for edgea in range(__UpperCamelCase ): if adjaceny_matrix[edgea][edgea] != "-": A__ = int(adjaceny_matrix[edgea][edgea] ) A__ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase ) A__ = graph.prims_algorithm() A__ = sum(graph.edges.values() ) A__ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'{solution() = }')
716
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
52
0
def A ( __UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def A ( ) -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
717
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class __lowerCAmelCase : """simple docstring""" A__ : Dict = PegasusConfig A__ : List[Any] = {} A__ : int = "gelu" def __init__( self : Dict , _snake_case : Optional[Any] , _snake_case : Dict=13 , _snake_case : Union[str, Any]=7 , _snake_case : List[str]=True , _snake_case : List[str]=False , _snake_case : Any=99 , _snake_case : List[Any]=32 , _snake_case : List[Any]=2 , _snake_case : Tuple=4 , _snake_case : Union[str, Any]=37 , _snake_case : Optional[Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Optional[int]=40 , _snake_case : Optional[Any]=2 , _snake_case : Tuple=1 , _snake_case : int=0 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def _a ( self : Optional[Any] ): """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case ) return config, inputs_dict def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ): """simple docstring""" A__ = TFPegasusModel(config=_snake_case ).get_decoder() A__ = inputs_dict['input_ids'] A__ = input_ids[:1, :] A__ = inputs_dict['attention_mask'][:1, :] A__ = inputs_dict['head_mask'] A__ = 1 # first forward pass A__ = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(_snake_case , attention_mask=_snake_case )[0] A__ = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple: if attention_mask is None: A__ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () A__ : Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else () A__ : Optional[int] = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) A__ : Optional[Any] = True A__ : Optional[Any] = False A__ : Union[str, Any] = False def _a ( self : str ): """simple docstring""" A__ = TFPegasusModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Optional[int] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_snake_case ) @require_sentencepiece @require_tokenizers @require_tf class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" A__ : Optional[int] = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] A__ : Union[str, Any] = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers A__ : Optional[int] = "google/pegasus-xsum" @cached_property def _a ( self : Tuple ): """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Tuple ): """simple docstring""" A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Union[str, Any] , **_snake_case : Optional[int] ): """simple docstring""" A__ = self.translate_src_text(**_snake_case ) assert self.expected_text == generated_words def _a ( self : Any , **_snake_case : Any ): """simple docstring""" A__ = self.tokenizer(self.src_text , **_snake_case , padding=_snake_case , return_tensors='tf' ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case ) return generated_words @slow def _a ( self : Union[str, Any] ): """simple docstring""" self._assert_generated_batch_equal_expected()
718
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
52
0
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def A ( __UpperCamelCase ) -> str: random.seed(__UpperCamelCase ) np.random.seed(__UpperCamelCase ) torch.manual_seed(__UpperCamelCase ) torch.cuda.manual_seed_all(__UpperCamelCase ) # ^^ safe to call this function even if cuda is not available class __lowerCAmelCase : """simple docstring""" def __init__( self : Any , _snake_case : Iterable[torch.nn.Parameter] , _snake_case : float = 0.9999 , _snake_case : float = 0.0 , _snake_case : int = 0 , _snake_case : bool = False , _snake_case : Union[float, int] = 1.0 , _snake_case : Union[float, int] = 2 / 3 , _snake_case : Optional[Any] = None , _snake_case : Dict[str, Any] = None , **_snake_case : List[Any] , ): """simple docstring""" if isinstance(_snake_case , torch.nn.Module ): A__ = ( 'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ' 'Please pass the parameters of the module instead.' ) deprecate( 'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _snake_case , standard_warn=_snake_case , ) A__ = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ = True if kwargs.get('max_value' , _snake_case ) is not None: A__ = 'The `max_value` argument is deprecated. Please use `decay` instead.' deprecate('max_value' , '1.0.0' , _snake_case , standard_warn=_snake_case ) A__ = kwargs['max_value'] if kwargs.get('min_value' , _snake_case ) is not None: A__ = 'The `min_value` argument is deprecated. Please use `min_decay` instead.' deprecate('min_value' , '1.0.0' , _snake_case , standard_warn=_snake_case ) A__ = kwargs['min_value'] A__ = list(_snake_case ) A__ = [p.clone().detach() for p in parameters] if kwargs.get('device' , _snake_case ) is not None: A__ = 'The `device` argument is deprecated. Please use `to` instead.' deprecate('device' , '1.0.0' , _snake_case , standard_warn=_snake_case ) self.to(device=kwargs['device'] ) A__ = None A__ = decay A__ = min_decay A__ = update_after_step A__ = use_ema_warmup A__ = inv_gamma A__ = power A__ = 0 A__ = None # set in `step()` A__ = model_cls A__ = model_config @classmethod def _a ( cls : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : str ): """simple docstring""" A__ , A__ = model_cls.load_config(_snake_case , return_unused_kwargs=_snake_case ) A__ = model_cls.from_pretrained(_snake_case ) A__ = cls(model.parameters() , model_cls=_snake_case , model_config=model.config ) ema_model.load_state_dict(_snake_case ) return ema_model def _a ( self : str , _snake_case : Dict ): """simple docstring""" if self.model_cls is None: raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' ) if self.model_config is None: raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' ) A__ = self.model_cls.from_config(self.model_config ) A__ = self.state_dict() state_dict.pop('shadow_params' , _snake_case ) model.register_to_config(**_snake_case ) self.copy_to(model.parameters() ) model.save_pretrained(_snake_case ) def _a ( self : List[Any] , _snake_case : int ): """simple docstring""" A__ = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ = 1 - (1 + step / self.inv_gamma) ** -self.power else: A__ = (1 + step) / (10 + step) A__ = min(_snake_case , self.decay ) # make sure decay is not smaller than min_decay A__ = max(_snake_case , self.min_decay ) return cur_decay_value @torch.no_grad() def _a ( self : int , _snake_case : Iterable[torch.nn.Parameter] ): """simple docstring""" if isinstance(_snake_case , torch.nn.Module ): A__ = ( 'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ' 'Please pass the parameters of the module instead.' ) deprecate( 'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _snake_case , standard_warn=_snake_case , ) A__ = parameters.parameters() A__ = list(_snake_case ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ = self.get_decay(self.optimization_step ) A__ = decay A__ = 1 - decay A__ = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , _snake_case ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ = deepspeed.zero.GatheredParameters(_snake_case , modifier_rank=_snake_case ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(_snake_case ) def _a ( self : Dict , _snake_case : Iterable[torch.nn.Parameter] ): """simple docstring""" A__ = list(_snake_case ) for s_param, param in zip(self.shadow_params , _snake_case ): param.data.copy_(s_param.to(param.device ).data ) def _a ( self : Optional[int] , _snake_case : Optional[int]=None , _snake_case : List[str]=None ): """simple docstring""" A__ = [ p.to(device=_snake_case , dtype=_snake_case ) if p.is_floating_point() else p.to(device=_snake_case ) for p in self.shadow_params ] def _a ( self : Union[str, Any] ): """simple docstring""" return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def _a ( self : Optional[Any] , _snake_case : Iterable[torch.nn.Parameter] ): """simple docstring""" A__ = [param.detach().cpu().clone() for param in parameters] def _a ( self : Tuple , _snake_case : Iterable[torch.nn.Parameter] ): """simple docstring""" if self.temp_stored_params is None: raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' ) for c_param, param in zip(self.temp_stored_params , _snake_case ): param.data.copy_(c_param.data ) # Better memory-wise. A__ = None def _a ( self : Optional[int] , _snake_case : dict ): """simple docstring""" A__ = copy.deepcopy(_snake_case ) A__ = state_dict.get('decay' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('Decay must be between 0 and 1' ) A__ = state_dict.get('min_decay' , self.min_decay ) if not isinstance(self.min_decay , _snake_case ): raise ValueError('Invalid min_decay' ) A__ = state_dict.get('optimization_step' , self.optimization_step ) if not isinstance(self.optimization_step , _snake_case ): raise ValueError('Invalid optimization_step' ) A__ = state_dict.get('update_after_step' , self.update_after_step ) if not isinstance(self.update_after_step , _snake_case ): raise ValueError('Invalid update_after_step' ) A__ = state_dict.get('use_ema_warmup' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , _snake_case ): raise ValueError('Invalid use_ema_warmup' ) A__ = state_dict.get('inv_gamma' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('Invalid inv_gamma' ) A__ = state_dict.get('power' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('Invalid power' ) A__ = state_dict.get('shadow_params' , _snake_case ) if shadow_params is not None: A__ = shadow_params if not isinstance(self.shadow_params , _snake_case ): raise ValueError('shadow_params must be a list' ) if not all(isinstance(_snake_case , torch.Tensor ) for p in self.shadow_params ): raise ValueError('shadow_params must all be Tensors' )
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = features.copy() if features else default_expected_features A__ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: A__ = tmp_path / 'cache' A__ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} A__ = features.copy() if features else default_expected_features A__ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} A__ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} A__ = features.copy() A__ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ = tmp_path / 'cache' A__ = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase , __UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: if issubclass(__UpperCamelCase , __UpperCamelCase ): A__ = jsonl_path elif issubclass(__UpperCamelCase , __UpperCamelCase ): A__ = [jsonl_path] A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ) for split in splits: A__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A__ = JsonDatasetReader({'train': jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_json_datasetdict(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = features.copy() if features else default_expected_features A__ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ = JsonDatasetReader({'train': jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_json_datasetdict(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: if split: A__ = {split: jsonl_path} else: A__ = 'train' A__ = {'train': jsonl_path, 'test': jsonl_path} A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A ( __UpperCamelCase ) -> List[str]: return json.load(__UpperCamelCase ) def A ( __UpperCamelCase ) -> Any: return [json.loads(__UpperCamelCase ) for line in buffer] class __lowerCAmelCase : """simple docstring""" @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def _a ( self : int , _snake_case : Dict , _snake_case : int , _snake_case : Any ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case ).write() buffer.seek(0 ) A__ = load_json_function(_snake_case ) assert isinstance(_snake_case , _snake_case ) assert isinstance(exported_content[0] , _snake_case ) assert len(_snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def _a ( self : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : str ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case , orient=_snake_case ).write() buffer.seek(0 ) A__ = load_json(_snake_case ) assert isinstance(_snake_case , _snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case , num_proc=2 ).write() buffer.seek(0 ) A__ = load_json_function(_snake_case ) assert isinstance(_snake_case , _snake_case ) assert isinstance(exported_content[0] , _snake_case ) assert len(_snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def _a ( self : Union[str, Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(_snake_case , _snake_case , lines=_snake_case , orient=_snake_case , num_proc=2 ).write() buffer.seek(0 ) A__ = load_json(_snake_case ) assert isinstance(_snake_case , _snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_snake_case ) == 10 def _a ( self : Tuple , _snake_case : Optional[int] ): """simple docstring""" with pytest.raises(_snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(_snake_case , _snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def _a ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Dict ): """simple docstring""" A__ = tmp_path_factory.mktemp('data' ) / F'''test.json.{extension}''' A__ = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_snake_case , _snake_case , compression=_snake_case ).write() with fsspec.open(_snake_case , 'rb' , compression='infer' ) as f: A__ = f.read() with fsspec.open(_snake_case , 'rb' , compression='infer' ) as f: A__ = f.read() assert exported_content == original_content
720
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( __UpperCamelCase ) -> Tuple: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [] if args.gold_data_mode == "qa": A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase ) for answer_list in data[1]: A__ = ast.literal_eval(__UpperCamelCase ) answers.append(__UpperCamelCase ) else: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [[reference] for reference in references] A__ = A__ = A__ = 0 for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 100.0 * em / total A__ = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = args.k A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = A__ = 0 for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ): A__ = set(hypo.split('\t' )[:k] ) A__ = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A__ = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: def strip_title(__UpperCamelCase ): if title.startswith('"' ): A__ = title[1:] if title.endswith('"' ): A__ = title[:-1] return title A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device ) A__ = rag_model.rag.question_encoder(__UpperCamelCase ) A__ = question_enc_outputs[0] A__ = rag_model.retriever( __UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A__ = [] for docs in all_docs: A__ = [strip_title(__UpperCamelCase ) for title in docs['title']] provenance_strings.append('\t'.join(__UpperCamelCase ) ) return provenance_strings def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: with torch.no_grad(): A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase ) A__ = inputs_dict.input_ids.to(args.device ) A__ = inputs_dict.attention_mask.to(args.device ) A__ = rag_model.generate( # rag_model overwrites generate __UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) if args.print_predictions: for q, a in zip(__UpperCamelCase , __UpperCamelCase ): logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) ) return answers def A ( ) -> Any: A__ = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) A__ = parser.parse_args() A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A ( __UpperCamelCase ) -> int: A__ = {} if args.model_type is None: A__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration A__ = args.n_docs if args.index_name is not None: A__ = args.index_name if args.index_path is not None: A__ = args.index_path else: A__ = BartForConditionalGeneration A__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase ) A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase ) model.retriever.init_retrieval() else: A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: A__ = [] for line in tqdm(__UpperCamelCase ): questions.append(line.strip() ) if len(__UpperCamelCase ) == args.eval_batch_size: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) + '\n' ) preds_file.flush() A__ = [] if len(__UpperCamelCase ) > 0: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) ) preds_file.flush() score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = get_args() main(args)
52
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html SCREAMING_SNAKE_CASE__ = '''platform''' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Any: if attention_mask is None: A__ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , _snake_case : Any , _snake_case : Any=13 , _snake_case : int=7 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=False , _snake_case : List[str]=99 , _snake_case : List[Any]=16 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=4 , _snake_case : Tuple=4 , _snake_case : int="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.1 , _snake_case : List[Any]=32 , _snake_case : str=2 , _snake_case : Dict=1 , _snake_case : int=0 , _snake_case : int=0.02 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id A__ = initializer_range def _a ( self : str ): """simple docstring""" A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) A__ = shift_tokens_right(_snake_case , 1 , 2 ) A__ = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_snake_case , ) A__ = prepare_blenderbot_inputs_dict(_snake_case , _snake_case , _snake_case ) return config, inputs_dict def _a ( self : Any ): """simple docstring""" A__ , A__ = self.prepare_config_and_inputs() return config, inputs_dict def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : str ): """simple docstring""" A__ = 20 A__ = model_class_name(_snake_case ) A__ = model.encode(inputs_dict['input_ids'] ) A__ , A__ = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) A__ = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case ) A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A__ = model.decode( decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , ) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) A__ = model.decode( decoder_input_ids[:, -1:] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_snake_case , ) A__ = model.decode(_snake_case , _snake_case ) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def _a ( self : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[Any] ): """simple docstring""" A__ = 20 A__ = model_class_name(_snake_case ) A__ = model.encode(inputs_dict['input_ids'] ) A__ , A__ = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) A__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) A__ = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case ) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A__ = model.decode( decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , ) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) A__ = model.decode( decoder_input_ids[:, -1:] , _snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_snake_case , decoder_position_ids=_snake_case , ) A__ = model.decode(_snake_case , _snake_case , decoder_attention_mask=_snake_case ) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" A__ : Any = 99 def _a ( self : int ): """simple docstring""" A__ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) A__ = input_ids.shape[0] A__ = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _a ( self : Tuple ): """simple docstring""" A__ , A__ , A__ = self._get_config_and_data() A__ = FlaxBlenderbotSmallForConditionalGeneration(_snake_case ) A__ = lm_model(input_ids=_snake_case ) A__ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) A__ = FlaxBlenderbotSmallForConditionalGeneration(_snake_case ) A__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) A__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) A__ = lm_model(input_ids=_snake_case , decoder_input_ids=_snake_case ) A__ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) A__ = shift_tokens_right(_snake_case , 1 , 2 ) A__ = np.equal(_snake_case , 1 ).astype(np.floataa ).sum() A__ = np.equal(_snake_case , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_snake_case , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = True A__ : Union[str, Any] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) A__ : int = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def _a ( self : Any ): """simple docstring""" A__ = FlaxBlenderbotSmallModelTester(self ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_snake_case , _snake_case , _snake_case ) def _a ( self : Optional[int] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_snake_case , _snake_case , _snake_case ) def _a ( self : List[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = self._prepare_for_class(_snake_case , _snake_case ) A__ = model_class(_snake_case ) @jax.jit def encode_jitted(_snake_case : str , _snake_case : int=None , **_snake_case : Union[str, Any] ): return model.encode(input_ids=_snake_case , attention_mask=_snake_case ) with self.subTest('JIT Enabled' ): A__ = encode_jitted(**_snake_case ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): A__ = encode_jitted(**_snake_case ).to_tuple() self.assertEqual(len(_snake_case ) , len(_snake_case ) ) for jitted_output, output in zip(_snake_case , _snake_case ): self.assertEqual(jitted_output.shape , output.shape ) def _a ( self : Tuple ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = model_class(_snake_case ) A__ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) A__ = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Tuple ): return model.decode( decoder_input_ids=_snake_case , decoder_attention_mask=_snake_case , encoder_outputs=_snake_case , ) with self.subTest('JIT Enabled' ): A__ = decode_jitted(**_snake_case ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): A__ = decode_jitted(**_snake_case ).to_tuple() self.assertEqual(len(_snake_case ) , len(_snake_case ) ) for jitted_output, output in zip(_snake_case , _snake_case ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _a ( self : List[str] ): """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained('facebook/blenderbot_small-90M' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids A__ = np.ones((1, 1) ) * model.config.eos_token_id A__ = model(_snake_case ) self.assertIsNotNone(_snake_case )
721
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def _a ( self : Any ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ): """simple docstring""" A__ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Dict ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : Dict ): """simple docstring""" A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : int ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def _a ( self : List[str] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: A__ = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _a ( self : int ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Union[str, Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def _a ( self : List[Any] ): """simple docstring""" A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = model(**_snake_case ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
52
0
def A ( __UpperCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
700
def A ( __UpperCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
52
0
import baseaa def A ( __UpperCamelCase ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def A ( __UpperCamelCase ) -> str: return baseaa.baadecode(__UpperCamelCase ).decode('utf-8' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''Hello World!''' SCREAMING_SNAKE_CASE__ = baseaa_encode(test) print(encoded) SCREAMING_SNAKE_CASE__ = baseaa_decode(encoded) print(decoded)
701
from typing import Dict from .base import GenericTensor, Pipeline class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ): """simple docstring""" if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ): """simple docstring""" A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Dict ): """simple docstring""" A__ = self.model(**_snake_case ) return model_outputs def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ): """simple docstring""" return super().__call__(*_snake_case , **_snake_case )
52
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
702
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) A__ : str = field(metadata={"help": "Should contain the data files for the task."} ) A__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: A__ = processors[data_args.task_name]() A__ = processor.get_labels() A__ = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase ) -> Dict: A__ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )} # Data collator A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A__ = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def A ( __UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
52
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Dict = "trocr" A__ : List[Any] = ["past_key_values"] A__ : List[Any] = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self : Tuple , _snake_case : Any=5_02_65 , _snake_case : Optional[Any]=10_24 , _snake_case : List[Any]=12 , _snake_case : List[Any]=16 , _snake_case : str=40_96 , _snake_case : List[str]="gelu" , _snake_case : List[Any]=5_12 , _snake_case : Dict=0.1 , _snake_case : Any=0.0 , _snake_case : str=0.0 , _snake_case : List[str]=2 , _snake_case : Tuple=0.02 , _snake_case : int=0.0 , _snake_case : Dict=True , _snake_case : List[str]=False , _snake_case : Optional[int]=True , _snake_case : Any=True , _snake_case : List[Any]=1 , _snake_case : List[Any]=0 , _snake_case : Union[str, Any]=2 , **_snake_case : Optional[int] , ): """simple docstring""" A__ = vocab_size A__ = d_model A__ = decoder_layers A__ = decoder_attention_heads A__ = decoder_ffn_dim A__ = activation_function A__ = max_position_embeddings A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = init_std A__ = decoder_layerdrop A__ = use_cache A__ = scale_embedding A__ = use_learned_position_embeddings A__ = layernorm_embedding super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
703
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
52
0
import os def A ( ) -> Any: with open(os.path.dirname(__UpperCamelCase ) + '/grid.txt' ) as f: A__ = [] # noqa: E741 for _ in range(20 ): l.append([int(__UpperCamelCase ) for x in f.readline().split()] ) A__ = 0 # right for i in range(20 ): for j in range(17 ): A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: A__ = temp # down for i in range(17 ): for j in range(20 ): A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: A__ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: A__ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: A__ = temp return maximum if __name__ == "__main__": print(solution())
704
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100' SCREAMING_SNAKE_CASE__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: SCREAMING_SNAKE_CASE__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: SCREAMING_SNAKE_CASE__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
52
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Tuple = VOCAB_FILES_NAMES A__ : int = PRETRAINED_VOCAB_FILES_MAP A__ : Dict = ["input_ids", "attention_mask"] A__ : Optional[Any] = None def __init__( self : Dict , _snake_case : str=None , _snake_case : int=None , _snake_case : str=None , _snake_case : Union[str, Any]="<unk>" , _snake_case : str="<s>" , _snake_case : int="</s>" , _snake_case : Tuple="<pad>" , _snake_case : Dict=False , _snake_case : int=False , **_snake_case : str , ): """simple docstring""" super().__init__( _snake_case , _snake_case , tokenizer_file=_snake_case , unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , add_prefix_space=_snake_case , clean_up_tokenization_spaces=_snake_case , **_snake_case , ) A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _snake_case ) != add_prefix_space: A__ = getattr(_snake_case , pre_tok_state.pop('type' ) ) A__ = add_prefix_space A__ = pre_tok_class(**_snake_case ) A__ = add_prefix_space def _a ( self : Union[str, Any] , *_snake_case : List[Any] , **_snake_case : Union[str, Any] ): """simple docstring""" A__ = kwargs.get('is_split_into_words' , _snake_case ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._batch_encode_plus(*_snake_case , **_snake_case ) def _a ( self : str , *_snake_case : Optional[int] , **_snake_case : Any ): """simple docstring""" A__ = kwargs.get('is_split_into_words' , _snake_case ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._encode_plus(*_snake_case , **_snake_case ) def _a ( self : int , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" A__ = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case ) def _a ( self : Tuple , _snake_case : "Conversation" ): """simple docstring""" A__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] ) if len(_snake_case ) > self.model_max_length: A__ = input_ids[-self.model_max_length :] return input_ids
705
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Any = IFInpaintingPipeline A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"} def _a ( self : Any ): """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): A__ = torch.manual_seed(_snake_case ) else: A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self : Dict ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _a ( self : int ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _a ( self : Optional[int] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _a ( self : List[str] ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _a ( self : Dict ): """simple docstring""" self._test_save_load_local() def _a ( self : Optional[int] ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
52
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input SCREAMING_SNAKE_CASE__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def A ( ) -> Optional[Any]: A__ = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def A ( __UpperCamelCase=None ) -> Dict: if subparsers is not None: A__ = subparsers.add_parser('config' , description=__UpperCamelCase ) else: A__ = argparse.ArgumentParser('Accelerate config command' , description=__UpperCamelCase ) parser.add_argument( '--config_file' , default=__UpperCamelCase , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=__UpperCamelCase ) return parser def A ( __UpperCamelCase ) -> str: A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(__UpperCamelCase ): os.makedirs(__UpperCamelCase ) A__ = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(__UpperCamelCase ) else: config.to_yaml_file(__UpperCamelCase ) print(f'''accelerate configuration saved at {config_file}''' ) def A ( ) -> List[str]: A__ = config_command_parser() A__ = parser.parse_args() config_command(__UpperCamelCase ) if __name__ == "__main__": main()
706
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) SCREAMING_SNAKE_CASE__ = r''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ): """simple docstring""" for processor in self: A__ = inspect.signature(processor.__call__ ).parameters if len(_snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' F'''{processor.__class__} are passed to the logits processor.''' ) A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case ) else: A__ = processor(_snake_case , _snake_case , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : float ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' ) A__ = temperature def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores / self.temperature return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) A__ = top_p A__ = filter_value A__ = min_tokens_to_keep def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] ) A__ = jnp.full_like(_snake_case , self.filter_value ) A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 ) A__ = cumulative_probs < self.top_p # include the token that is higher than top_p as well A__ = jnp.roll(_snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(_snake_case ) # min tokens to keep A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case ) A__ = jnp.where(_snake_case , _snake_case , _snake_case ) A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1] return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) A__ = max(_snake_case , _snake_case ) A__ = filter_value def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = scores.shape A__ = jnp.full(batch_size * vocab_size , self.filter_value ) A__ = min(self.top_k , scores.shape[-1] ) # Safety check A__ , A__ = lax.top_k(_snake_case , _snake_case ) A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A__ = topk_scores.flatten() A__ = topk_indices.flatten() + shift A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case ) A__ = next_scores_flat.reshape(_snake_case , _snake_case ) return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int ): """simple docstring""" A__ = bos_token_id def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int , _snake_case : int ): """simple docstring""" A__ = max_length A__ = eos_token_id def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) A__ = min_length A__ = eos_token_id def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ): """simple docstring""" A__ = list(_snake_case ) A__ = begin_index def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.bool_(cur_len - self.begin_index ) A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : list ): """simple docstring""" A__ = list(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , _snake_case : Optional[Any] ): """simple docstring""" A__ = dict(_snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A__ = force_token_array.at[index].set(_snake_case ) A__ = jnp.intaa(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" def _force_token(_snake_case : Dict ): A__ = scores.shape[0] A__ = self.force_token_array[generation_idx] A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' ) A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) ) return new_scores A__ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ): """simple docstring""" A__ = generate_config.eos_token_id A__ = generate_config.no_timestamps_token_id A__ = generate_config.no_timestamps_token_id + 1 A__ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_snake_case , 'max_initial_timestamp_index' ): A__ = generate_config.max_initial_timestamp_index else: A__ = model_config.vocab_size if self.max_initial_timestamp_index is None: A__ = model_config.vocab_size def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ): """simple docstring""" A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(_snake_case : Dict , _snake_case : str ): A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , ) A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , ) return jnp.where( _snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case ) A__ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , ) A__ = self.timestamp_begin + self.max_initial_timestamp_index A__ = jnp.where( _snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp A__ = jax.nn.log_softmax(_snake_case , axis=-1 ) def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ): A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A__ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) return scores
52
0
import os import sys import unittest SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, '''src''', '''transformers''') SCREAMING_SNAKE_CASE__ = ''' {0} = None ''' SCREAMING_SNAKE_CASE__ = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' SCREAMING_SNAKE_CASE__ = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A__ = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(_snake_case ) A__ = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(_snake_case , 'tokenizers' ) A__ = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(_snake_case , 'tensorflow_text' ) A__ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(_snake_case , 'sentencepiece_and_tokenizers' ) A__ = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(_snake_case , 'sentencepiece_and_tensorflow_text' ) A__ = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(_snake_case , 'sentencepiece_and_tokenizers_and_vision' ) def _a ( self : str ): """simple docstring""" A__ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , _snake_case ) self.assertIn('tensorflow_text' , _snake_case ) self.assertIn('sentencepiece_and_tokenizers' , _snake_case ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(_snake_case , '\nCONSTANT = None\n' ) A__ = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( _snake_case , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) A__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' A__ = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(_snake_case , _snake_case ) def _a ( self : List[Any] ): """simple docstring""" A__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' A__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , _snake_case )
707
import argparse import struct import unittest class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _snake_case : bytes ): """simple docstring""" A__ = data # Initialize hash values A__ = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants A__ = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] A__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _snake_case : bytes ): """simple docstring""" A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64)) A__ = struct.pack('>Q' , (len(_snake_case ) * 8) ) return data + padding + big_endian_integer def _a ( self : Optional[int] ): """simple docstring""" A__ = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A__ = list(struct.unpack('>16L' , _snake_case ) ) # add 48 0-ed integers words += [0] * 48 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A__ = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A__ = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 ) A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g) A__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 ) A__ = (a & b) ^ (a & c) ^ (b & c) A__ = (sa + maj) % 0x100000000 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A__ = [a, b, c, d, e, f, g, h] # Modify final values A__ = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" import hashlib A__ = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() ) def A ( ) -> None: import doctest doctest.testmod() A__ = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A__ = parser.parse_args() A__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A__ = f.read() else: A__ = bytes(__UpperCamelCase , 'utf-8' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
52
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A__ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) A__ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above A__ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above A__ = tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) A__ = output[output != -float('inf' )] A__ = tf.cast( tf.where(tf.not_equal(_snake_case , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-12 ) tf.debugging.assert_equal(_snake_case , _snake_case ) @require_tf class __lowerCAmelCase ( unittest.TestCase , UpperCAmelCase_ ): """simple docstring""" if is_tf_available(): A__ : List[Any] = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq, "AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM, "AutoModelForVision2Seq": TFAutoModelForVisionaSeq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def _a ( self : str ): """simple docstring""" A__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) A__ = 2 A__ = 2 class __lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self : Any , _snake_case : str ): """simple docstring""" super(_snake_case , self ).__init__() A__ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ), ) , jit_compile=_snake_case , ) def _a ( self : Optional[int] , _snake_case : Dict , _snake_case : List[str] ): """simple docstring""" A__ = self.model.generate( input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , ) return {"sequences": outputs["sequences"]} A__ = [[2, 0], [1_02, 1_03]] A__ = [[1, 0], [1, 1]] A__ = DummyModel(model=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_snake_case , _snake_case , signatures={'serving_default': dummy_model.serving} ) A__ = tf.saved_model.load(_snake_case ).signatures['serving_default'] for batch_size in range(1 , len(_snake_case ) + 1 ): A__ = { 'input_ids': tf.constant(dummy_input_ids[:batch_size] ), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ), } A__ = serving_func(**_snake_case )['sequences'] A__ = test_model.generate(**_snake_case , max_new_tokens=_snake_case ) tf.debugging.assert_equal(_snake_case , _snake_case ) @slow def _a ( self : Tuple ): """simple docstring""" A__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) A__ = 1 A__ = 2 class __lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self : Optional[int] , _snake_case : Tuple ): """simple docstring""" super(_snake_case , self ).__init__() A__ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ), ) , jit_compile=_snake_case , ) def _a ( self : Tuple , _snake_case : Any , _snake_case : List[Any] ): """simple docstring""" A__ = self.model.generate( input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , ) return {"sequences": outputs["sequences"]} A__ = [[2], [1_02, 1_03]] A__ = [[1], [1, 1]] A__ = DummyModel(model=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_snake_case , _snake_case , signatures={'serving_default': dummy_model.serving} ) A__ = tf.saved_model.load(_snake_case ).signatures['serving_default'] for input_row in range(len(_snake_case ) ): A__ = { 'input_ids': tf.constant([dummy_input_ids[input_row]] ), 'attention_mask': tf.constant([dummy_attention_masks[input_row]] ), } A__ = serving_func(**_snake_case )['sequences'] A__ = test_model.generate(**_snake_case , max_new_tokens=_snake_case ) tf.debugging.assert_equal(_snake_case , _snake_case ) @slow @require_tensorflow_text def _a ( self : Optional[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_snake_case ) class __lowerCAmelCase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict ): """simple docstring""" super().__init__() A__ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_snake_case , 'spiece.model' ) , 'rb' ).read() ) A__ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' ) def _a ( self : Any , _snake_case : Tuple , *_snake_case : Optional[Any] , **_snake_case : Union[str, Any] ): """simple docstring""" A__ = self.tokenizer.tokenize(_snake_case ) A__ , A__ = text.pad_model_inputs( _snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) A__ = self.model.generate(input_ids=_snake_case , attention_mask=_snake_case ) return self.tokenizer.detokenize(_snake_case ) A__ = CompleteSentenceTransformer() A__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' ) A__ = complete_model(_snake_case ) A__ = tf.keras.Model(_snake_case , _snake_case ) keras_model.save(_snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } A__ = 14 A__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) A__ = 'Hello, my dog is cute and' A__ = tokenizer(_snake_case , return_tensors='tf' ) A__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) A__ = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) A__ = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) A__ = [6_38, 1_98] with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) A__ = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' ) A__ = 'Hugging Face is a technology company based in New York and Paris.' A__ = bart_tokenizer(_snake_case , return_tensors='tf' ).input_ids A__ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' ) A__ = bart_model.generate(_snake_case ).numpy() class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : List[str] , _snake_case : List[Any] , _snake_case : Dict=None , **_snake_case : int ): """simple docstring""" return super().call(_snake_case , **_snake_case ) A__ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' ) A__ = bart_model.generate(_snake_case , foo='bar' ).numpy() self.assertTrue(np.array_equal(_snake_case , _snake_case ) ) class __lowerCAmelCase ( bart_model.model.encoder.__class__ ): """simple docstring""" def _a ( self : str , _snake_case : Tuple , **_snake_case : str ): """simple docstring""" return super().call(_snake_case , **_snake_case ) A__ = FakeEncoder(bart_model.config , bart_model.model.shared ) A__ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) A__ = bart_model.generate(_snake_case ).numpy() with self.assertRaises(_snake_case ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_snake_case , foo='bar' )
708
import math import random def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value SCREAMING_SNAKE_CASE__ = 0.02 def A ( __UpperCamelCase , __UpperCamelCase ) -> float: A__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__UpperCamelCase ): # Forward propagation A__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A__ = (expected / 100) - layer_a # Error delta A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = int(input('''Expected value: ''')) SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
52
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_): """simple docstring""" A__ : str = "bit" A__ : Any = ["preactivation", "bottleneck"] A__ : Optional[int] = ["SAME", "VALID"] def __init__( self : Dict , _snake_case : List[str]=3 , _snake_case : Optional[Any]=64 , _snake_case : Dict=[2_56, 5_12, 10_24, 20_48] , _snake_case : Any=[3, 4, 6, 3] , _snake_case : Union[str, Any]="preactivation" , _snake_case : Optional[int]="relu" , _snake_case : Optional[Any]=None , _snake_case : str=32 , _snake_case : Dict=0.0 , _snake_case : Any=False , _snake_case : List[str]=32 , _snake_case : Optional[int]=1 , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , **_snake_case : int , ): """simple docstring""" super().__init__(**_snake_case ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A__ = global_padding.upper() else: raise ValueError(F'''Padding strategy {global_padding} not supported''' ) A__ = num_channels A__ = embedding_size A__ = hidden_sizes A__ = depths A__ = layer_type A__ = hidden_act A__ = global_padding A__ = num_groups A__ = drop_path_rate A__ = embedding_dynamic_padding A__ = output_stride A__ = width_factor A__ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
709
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A__ = AutoTokenizer.from_pretrained('google/mt5-small' ) A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean() A__ = -(labels.shape[-1] * loss.item()) A__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
52
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @slow @require_torch def _a ( self : Union[str, Any] ): """simple docstring""" A__ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) A__ = BertTokenizer.from_pretrained('bert-base-uncased' ) A__ = bertabert.config.encoder.vocab_size A__ = tokenizer.sep_token_id A__ = tokenizer.cls_token_id A__ = 1_28 A__ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) A__ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) A__ = train_dataset.select(range(32 ) ) A__ = val_dataset.select(range(16 ) ) A__ = 4 def _map_to_encoder_decoder_inputs(_snake_case : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] A__ = tokenizer(batch['article'] , padding='max_length' , truncation=_snake_case , max_length=5_12 ) A__ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_snake_case , max_length=1_28 ) A__ = inputs.input_ids A__ = inputs.attention_mask A__ = outputs.input_ids A__ = outputs.input_ids.copy() A__ = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] A__ = outputs.attention_mask assert all(len(_snake_case ) == 5_12 for x in inputs.input_ids ) assert all(len(_snake_case ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(_snake_case : Union[str, Any] ): A__ = pred.label_ids A__ = pred.predictions # all unnecessary tokens are removed A__ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) A__ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) A__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case ) return {"accuracy": accuracy} # map train dataset A__ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset A__ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) A__ = self.get_auto_remove_tmp_dir() A__ = SeqaSeqTrainingArguments( output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='steps' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer A__ = SeqaSeqTrainer( model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , ) # start training trainer.train()
710
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = "roberta" def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
52
0
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = random.Random() def A ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ) -> Any: if rng is None: A__ = global_rng A__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=7 , _snake_case : Tuple=4_00 , _snake_case : Any=20_00 , _snake_case : Dict=1 , _snake_case : int=0.0 , _snake_case : List[str]=1_60_00 , _snake_case : Dict=True , _snake_case : Dict=80 , _snake_case : List[str]=16 , _snake_case : Union[str, Any]=64 , _snake_case : Optional[Any]="hann_window" , _snake_case : Any=80 , _snake_case : Tuple=76_00 , _snake_case : Any=1E-10 , _snake_case : str=True , ): """simple docstring""" A__ = parent A__ = batch_size A__ = min_seq_length A__ = max_seq_length A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) A__ = feature_size A__ = padding_value A__ = sampling_rate A__ = do_normalize A__ = num_mel_bins A__ = hop_length A__ = win_length A__ = win_function A__ = fmin A__ = fmax A__ = mel_floor A__ = return_attention_mask def _a ( self : Union[str, Any] ): """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _a ( self : int , _snake_case : Union[str, Any]=False , _snake_case : Union[str, Any]=False ): """simple docstring""" def _flatten(_snake_case : int ): return list(itertools.chain(*_snake_case ) ) if equal_length: A__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size A__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: A__ = [np.asarray(_snake_case ) for x in speech_inputs] return speech_inputs def _a ( self : Tuple , _snake_case : Any=False , _snake_case : str=False ): """simple docstring""" if equal_length: A__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size A__ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: A__ = [np.asarray(_snake_case ) for x in speech_inputs] return speech_inputs @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Dict = SpeechTaFeatureExtractor def _a ( self : Optional[Any] ): """simple docstring""" A__ = SpeechTaFeatureExtractionTester(self ) def _a ( self : Dict , _snake_case : Any ): """simple docstring""" self.assertTrue(np.all(np.mean(_snake_case , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_snake_case , axis=0 ) - 1 ) < 1E-3 ) ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] A__ = [np.asarray(_snake_case ) for speech_input in speech_inputs] # Test not batched input A__ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values A__ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # Test batched A__ = feat_extract(_snake_case , return_tensors='np' ).input_values A__ = feat_extract(_snake_case , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ): self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] A__ = ['longest', 'max_length', 'do_not_pad'] A__ = [None, 16_00, None] for max_length, padding in zip(_snake_case , _snake_case ): A__ = feat_extract(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors='np' ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self.assertTrue(input_values[0][8_00:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self.assertTrue(input_values[0][10_00:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def _a ( self : List[Any] ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = range(8_00 , 14_00 , 2_00 ) A__ = [floats_list((1, x) )[0] for x in lengths] A__ = ['longest', 'max_length', 'do_not_pad'] A__ = [None, 16_00, None] for max_length, padding in zip(_snake_case , _snake_case ): A__ = feat_extract(_snake_case , max_length=_snake_case , padding=_snake_case ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] A__ = feat_extract( _snake_case , truncation=_snake_case , max_length=10_00 , padding='max_length' , return_tensors='np' ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _a ( self : str ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] A__ = feat_extract( _snake_case , truncation=_snake_case , max_length=10_00 , padding='longest' , return_tensors='np' ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00) ) A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] A__ = feat_extract( _snake_case , truncation=_snake_case , max_length=20_00 , padding='longest' , return_tensors='np' ) A__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00) ) def _a ( self : Any ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A__ = np.random.rand(1_00 ).astype(np.floataa ) A__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: A__ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) A__ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _a ( self : Dict ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] A__ = [np.asarray(_snake_case ) for speech_input in speech_inputs] # Test feature size A__ = feature_extractor(audio_target=_snake_case , padding=_snake_case , return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input A__ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values A__ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # Test batched A__ = feature_extractor(_snake_case , return_tensors='np' ).input_values A__ = feature_extractor(_snake_case , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ): self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. A__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] A__ = np.asarray(_snake_case ) A__ = feature_extractor(_snake_case , return_tensors='np' ).input_values A__ = feature_extractor(_snake_case , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ): self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = self.feature_extraction_class(**self.feat_extract_dict ) A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) ) A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case ) A__ = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) A__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: A__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _a ( self : Tuple ): """simple docstring""" A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case ) A__ = self.feature_extraction_class(**self.feat_extract_dict ) A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) A__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: A__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _a ( self : Any ): """simple docstring""" A__ = self.feature_extraction_class(**self.feat_extract_dict ) A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) A__ = feat_extract.num_mel_bins # hack! A__ = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )[input_name] A__ = feat_extract.pad(_snake_case , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _a ( self : List[Any] ): """simple docstring""" A__ = self.feat_extract_dict A__ = True A__ = self.feature_extraction_class(**_snake_case ) A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = [len(_snake_case ) for x in speech_inputs] A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) A__ = feat_extract.num_mel_bins # hack! A__ = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , _snake_case ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.feat_extract_dict A__ = True A__ = self.feature_extraction_class(**_snake_case ) A__ = self.feat_extract_tester.prepare_inputs_for_target() A__ = [len(_snake_case ) for x in speech_inputs] A__ = feat_extract.model_input_names[0] A__ = BatchFeature({input_name: speech_inputs} ) A__ = min(_snake_case ) A__ = feat_extract.num_mel_bins # hack! A__ = feat_extract.pad( _snake_case , padding='max_length' , max_length=_snake_case , truncation=_snake_case , return_tensors='np' ) self.assertIn('attention_mask' , _snake_case ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _a ( self : List[str] , _snake_case : int ): """simple docstring""" from datasets import load_dataset A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech A__ = ds.sort('id' ).select(range(_snake_case ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def _a ( self : Dict ): """simple docstring""" A__ = torch.tensor( [2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03, 3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03, 2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04, 4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03, 7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04, 4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] ) # fmt: on A__ = self._load_datasamples(1 ) A__ = SpeechTaFeatureExtractor() A__ = feature_extractor(_snake_case , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 9_36_80) ) self.assertTrue(torch.allclose(input_values[0, :30] , _snake_case , atol=1E-6 ) ) def _a ( self : str ): """simple docstring""" A__ = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on A__ = self._load_datasamples(1 ) A__ = SpeechTaFeatureExtractor() A__ = feature_extractor(audio_target=_snake_case , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 3_66, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _snake_case , atol=1E-4 ) )
711
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : int = LongformerTokenizer A__ : Optional[int] = True A__ : Any = LongformerTokenizerFast A__ : Dict = True def _a ( self : int ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) def _a ( self : int , **_snake_case : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Optional[int] , **_snake_case : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Any , _snake_case : Optional[Any] ): """simple docstring""" A__ = 'lower newer' A__ = 'lower newer' return input_text, output_text def _a ( self : Any ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = 'lower newer' A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True) self.assertListEqual(_snake_case , _snake_case ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() A__ = 'Encode this sequence.' A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_snake_case , _snake_case ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_snake_case , _snake_case ) # Testing spaces after special tokens A__ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space A__ = tokenizer.convert_tokens_to_ids(_snake_case ) A__ = 'Encode <mask> sequence' A__ = 'Encode <mask>sequence' A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_snake_case , _snake_case ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = 'A, <mask> AllenNLP sentence.' A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _a ( self : List[Any] ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['trim_offsets'] , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` A__ = F'''{text_of_1_token} {text_of_1_token}''' A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
52
0
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Dict=True , _snake_case : Union[str, Any]=None , **_snake_case : int ): """simple docstring""" A__ = parent A__ = config_class A__ = has_text_modality A__ = kwargs A__ = common_properties def _a ( self : Tuple ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) A__ = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_snake_case , _snake_case ) , msg=F'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_snake_case ): try: setattr(_snake_case , _snake_case , _snake_case ) self.parent.assertEqual( getattr(_snake_case , _snake_case ) , _snake_case , msg=F'''`{name} value {idx} expected, but was {getattr(_snake_case , _snake_case )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_snake_case ): try: A__ = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_snake_case , _snake_case ) , _snake_case , msg=F'''`{name} value {idx} expected, but was {getattr(_snake_case , _snake_case )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _a ( self : Optional[int] ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) A__ = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _snake_case ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(_snake_case , 'config.json' ) config_first.to_json_file(_snake_case ) A__ = self.config_class.from_json_file(_snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _a ( self : List[str] ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_snake_case ) A__ = self.config_class.from_pretrained(_snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _a ( self : Dict ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) A__ = 'test' with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(_snake_case , _snake_case ) config_first.save_pretrained(_snake_case ) A__ = self.config_class.from_pretrained(_snake_case , subfolder=_snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _a ( self : Dict ): """simple docstring""" A__ = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) A__ = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _a ( self : List[Any] ): """simple docstring""" if self.config_class.is_composition: return A__ = self.config_class() self.parent.assertIsNotNone(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ = copy.deepcopy(_snake_case ) A__ = self.config_class(**_snake_case ) A__ = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(_snake_case , _snake_case ) != value: wrong_values.append((key, getattr(_snake_case , _snake_case ), value) ) if len(_snake_case ) > 0: A__ = '\n'.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' ) def _a ( self : Dict ): """simple docstring""" self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
712
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def A ( __UpperCamelCase ) -> str: config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? A__ = tmp_path_factory.getbasetemp() / 'cache' A__ = test_hf_cache_home / 'datasets' A__ = test_hf_cache_home / 'metrics' A__ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) ) @pytest.fixture(autouse=__UpperCamelCase , scope='session' ) def A ( ) -> Union[str, Any]: datasets.disable_progress_bar() @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase ) -> int: # don't take tests into account when counting downloads monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase ) @pytest.fixture def A ( __UpperCamelCase ) -> Any: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
52
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('''KEY''') SCREAMING_SNAKE_CASE__ = TypeVar('''VAL''') @dataclass(frozen=UpperCAmelCase_ , slots=UpperCAmelCase_ ) class __lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" A__ : KEY A__ : VAL class __lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self : int ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __bool__( self : List[str] ): """simple docstring""" return False SCREAMING_SNAKE_CASE__ = _DeletedItem() class __lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : List[Any] , _snake_case : int = 8 , _snake_case : float = 0.75 ): """simple docstring""" A__ = initial_block_size A__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 A__ = capacity_factor A__ = 0 def _a ( self : Dict , _snake_case : KEY ): """simple docstring""" return hash(_snake_case ) % len(self._buckets ) def _a ( self : Optional[Any] , _snake_case : int ): """simple docstring""" return (ind + 1) % len(self._buckets ) def _a ( self : Any , _snake_case : int , _snake_case : KEY , _snake_case : VAL ): """simple docstring""" A__ = self._buckets[ind] if not stored: A__ = _Item(_snake_case , _snake_case ) self._len += 1 return True elif stored.key == key: A__ = _Item(_snake_case , _snake_case ) return True else: return False def _a ( self : List[Any] ): """simple docstring""" A__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_snake_case ) def _a ( self : Optional[int] ): """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False A__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _a ( self : List[Any] , _snake_case : int ): """simple docstring""" A__ = self._buckets A__ = [None] * new_size A__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _a ( self : Optional[Any] ): """simple docstring""" self._resize(len(self._buckets ) * 2 ) def _a ( self : Dict ): """simple docstring""" self._resize(len(self._buckets ) // 2 ) def _a ( self : str , _snake_case : KEY ): """simple docstring""" A__ = self._get_bucket_index(_snake_case ) for _ in range(len(self._buckets ) ): yield ind A__ = self._get_next_ind(_snake_case ) def _a ( self : Any , _snake_case : KEY , _snake_case : VAL ): """simple docstring""" for ind in self._iterate_buckets(_snake_case ): if self._try_set(_snake_case , _snake_case , _snake_case ): break def __setitem__( self : Union[str, Any] , _snake_case : KEY , _snake_case : VAL ): """simple docstring""" if self._is_full(): self._size_up() self._add_item(_snake_case , _snake_case ) def __delitem__( self : List[str] , _snake_case : KEY ): """simple docstring""" for ind in self._iterate_buckets(_snake_case ): A__ = self._buckets[ind] if item is None: raise KeyError(_snake_case ) if item is _deleted: continue if item.key == key: A__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : List[Any] , _snake_case : KEY ): """simple docstring""" for ind in self._iterate_buckets(_snake_case ): A__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_snake_case ) def __len__( self : Any ): """simple docstring""" return self._len def __iter__( self : List[str] ): """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[int] ): """simple docstring""" A__ = ' ,'.join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
713
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
52
0
from __future__ import annotations def A ( __UpperCamelCase ) -> list[int]: return [ord(__UpperCamelCase ) - 96 for elem in plain] def A ( __UpperCamelCase ) -> str: return "".join(chr(elem + 96 ) for elem in encoded ) def A ( ) -> None: A__ = encode(input('-> ' ).strip().lower() ) print('Encoded: ' , __UpperCamelCase ) print('Decoded:' , decode(__UpperCamelCase ) ) if __name__ == "__main__": main()
714
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
52
0
from random import randint, random def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = 5 , ) -> list: A__ = [[-1] * number_of_cells] # Create a highway without any car A__ = 0 A__ = max(__UpperCamelCase , 0 ) while i < number_of_cells: A__ = ( randint(0 , __UpperCamelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def A ( __UpperCamelCase , __UpperCamelCase ) -> int: A__ = 0 A__ = highway_now[car_index + 1 :] for cell in range(len(__UpperCamelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(__UpperCamelCase , -1 ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list: A__ = len(__UpperCamelCase ) # Beforce calculations, the highway is empty A__ = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed A__ = min(highway_now[car_index] + 1 , __UpperCamelCase ) # Number of empty cell before the next car A__ = get_distance(__UpperCamelCase , __UpperCamelCase ) - 1 # We can't have the car causing an accident A__ = min(next_highway[car_index] , __UpperCamelCase ) if random() < probability: # Randomly, a driver will slow down A__ = max(next_highway[car_index] - 1 , 0 ) return next_highway def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list: A__ = len(highway[0] ) for i in range(__UpperCamelCase ): A__ = update(highway[i] , __UpperCamelCase , __UpperCamelCase ) A__ = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): A__ = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) A__ = (car_index + speed) % number_of_cells # Commit the change of position A__ = speed highway.append(__UpperCamelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
715
from typing import TYPE_CHECKING from ..utils import _LazyModule SCREAMING_SNAKE_CASE__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , _snake_case : TransformeraDModel , _snake_case : AutoencoderKL , _snake_case : KarrasDiffusionSchedulers , _snake_case : Optional[Dict[int, str]] = None , ): """simple docstring""" super().__init__() self.register_modules(transformer=_snake_case , vae=_snake_case , scheduler=_snake_case ) # create a imagenet -> id dictionary for easier use A__ = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): A__ = int(_snake_case ) A__ = dict(sorted(self.labels.items() ) ) def _a ( self : int , _snake_case : Union[str, List[str]] ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): A__ = list(_snake_case ) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : List[Any] , _snake_case : List[int] , _snake_case : float = 4.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : int = 50 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ): """simple docstring""" A__ = len(_snake_case ) A__ = self.transformer.config.sample_size A__ = self.transformer.config.in_channels A__ = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_snake_case , device=self.device , dtype=self.transformer.dtype , ) A__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents A__ = torch.tensor(_snake_case , device=self.device ).reshape(-1 ) A__ = torch.tensor([10_00] * batch_size , device=self.device ) A__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: A__ = latent_model_input[: len(_snake_case ) // 2] A__ = torch.cat([half, half] , dim=0 ) A__ = self.scheduler.scale_model_input(_snake_case , _snake_case ) A__ = t if not torch.is_tensor(_snake_case ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) A__ = latent_model_input.device.type == 'mps' if isinstance(_snake_case , _snake_case ): A__ = torch.floataa if is_mps else torch.floataa else: A__ = torch.intaa if is_mps else torch.intaa A__ = torch.tensor([timesteps] , dtype=_snake_case , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: A__ = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A__ = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output A__ = self.transformer( _snake_case , timestep=_snake_case , class_labels=_snake_case ).sample # perform guidance if guidance_scale > 1: A__ , A__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] A__ , A__ = torch.split(_snake_case , len(_snake_case ) // 2 , dim=0 ) A__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps) A__ = torch.cat([half_eps, half_eps] , dim=0 ) A__ = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: A__ , A__ = torch.split(_snake_case , _snake_case , dim=1 ) else: A__ = noise_pred # compute previous image: x_t -> x_t-1 A__ = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample if guidance_scale > 1: A__ , A__ = latent_model_input.chunk(2 , dim=0 ) else: A__ = latent_model_input A__ = 1 / self.vae.config.scaling_factor * latents A__ = self.vae.decode(_snake_case ).sample A__ = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A__ = self.numpy_to_pil(_snake_case ) if not return_dict: return (samples,) return ImagePipelineOutput(images=_snake_case )
716
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
52
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : List[str] , _snake_case : List[str]=7 , _snake_case : Dict=3 , _snake_case : List[str]=30 , _snake_case : Dict=4_00 , _snake_case : List[str]=True , _snake_case : int=None , _snake_case : Optional[Any]=True , _snake_case : Tuple=[0.5, 0.5, 0.5] , _snake_case : List[Any]=[0.5, 0.5, 0.5] , _snake_case : Any=True , _snake_case : Optional[int]=1 / 2_55 , _snake_case : Any=True , ): """simple docstring""" A__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def _a ( self : str ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a ( self : List[str] , _snake_case : Tuple , _snake_case : str=False ): """simple docstring""" if not batched: A__ = image_inputs[0] if isinstance(_snake_case , Image.Image ): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['shortest_edge'] * h / w ) A__ = self.size['shortest_edge'] elif w > h: A__ = self.size['shortest_edge'] A__ = int(self.size['shortest_edge'] * w / h ) else: A__ = self.size['shortest_edge'] A__ = self.size['shortest_edge'] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A__ = max(_snake_case , key=lambda _snake_case : item[0] )[0] A__ = max(_snake_case , key=lambda _snake_case : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Dict = YolosImageProcessor if is_vision_available() else None def _a ( self : List[Any] ): """simple docstring""" A__ = YolosImageProcessingTester(self ) @property def _a ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , 'image_mean' ) ) self.assertTrue(hasattr(_snake_case , 'image_std' ) ) self.assertTrue(hasattr(_snake_case , 'do_normalize' ) ) self.assertTrue(hasattr(_snake_case , 'do_resize' ) ) self.assertTrue(hasattr(_snake_case , 'size' ) ) def _a ( self : str ): """simple docstring""" A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} ) self.assertEqual(image_processor.do_pad , _snake_case ) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_snake_case ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _snake_case ) def _a ( self : List[str] ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Tuple ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Optional[int] ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self : Any ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) A__ = self.image_processing_class(do_resize=_snake_case , do_normalize=_snake_case , do_rescale=_snake_case ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors A__ = image_processing_a.pad(_snake_case , return_tensors='pt' ) A__ = image_processing_a(_snake_case , return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) ) @slow def _a ( self : List[str] ): """simple docstring""" A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: A__ = json.loads(f.read() ) A__ = {'image_id': 3_97_69, 'annotations': target} # encode them A__ = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) A__ = image_processing(images=_snake_case , annotations=_snake_case , return_tensors='pt' ) # verify pixel values A__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _snake_case ) A__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _snake_case , atol=1E-4 ) ) # verify area A__ = torch.tensor([5887.9600, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _snake_case ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _snake_case ) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _snake_case , atol=1E-3 ) ) # verify image_id A__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _snake_case ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _snake_case ) ) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _snake_case ) ) # verify orig_size A__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _snake_case ) ) # verify size A__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _snake_case ) ) @slow def _a ( self : List[str] ): """simple docstring""" A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: A__ = json.loads(f.read() ) A__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} A__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them A__ = YolosImageProcessor(format='coco_panoptic' ) A__ = image_processing(images=_snake_case , annotations=_snake_case , masks_path=_snake_case , return_tensors='pt' ) # verify pixel values A__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _snake_case ) A__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _snake_case , atol=1E-4 ) ) # verify area A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _snake_case ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _snake_case ) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _snake_case , atol=1E-3 ) ) # verify image_id A__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _snake_case ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _snake_case ) ) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _snake_case ) ) # verify masks A__ = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _snake_case ) # verify orig_size A__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _snake_case ) ) # verify size A__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _snake_case ) )
717
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : str , _snake_case : Union[str, "sqlalchemy.sql.Selectable"] , _snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , **_snake_case : Optional[Any] , ): """simple docstring""" super().__init__(features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , **_snake_case ) A__ = Sql( cache_dir=_snake_case , features=_snake_case , sql=_snake_case , con=_snake_case , **_snake_case , ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = None A__ = None A__ = None A__ = None self.builder.download_and_prepare( download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , ) # Build dataset for splits A__ = self.builder.as_dataset( split='train' , verification_mode=_snake_case , in_memory=self.keep_in_memory ) return dataset class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Dataset , _snake_case : str , _snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , **_snake_case : Any , ): """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) A__ = dataset A__ = name A__ = con A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A__ = num_proc A__ = to_sql_kwargs def _a ( self : Any ): """simple docstring""" A__ = self.to_sql_kwargs.pop('sql' , _snake_case ) A__ = self.to_sql_kwargs.pop('con' , _snake_case ) A__ = self.to_sql_kwargs.pop('index' , _snake_case ) A__ = self._write(index=_snake_case , **self.to_sql_kwargs ) return written def _a ( self : Any , _snake_case : Union[str, Any] ): """simple docstring""" A__ , A__ , A__ = args A__ = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs A__ = query_table( table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size ) , indices=self.dataset._indices , ) A__ = batch.to_pandas() A__ = df.to_sql(self.name , self.con , index=_snake_case , **_snake_case ) return num_rows or len(_snake_case ) def _a ( self : Optional[int] , _snake_case : Dict , **_snake_case : Dict ): """simple docstring""" A__ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: A__ , A__ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _snake_case , _snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ): written += num_rows return written
718
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
52
0
'''simple docstring''' import math class __lowerCAmelCase : """simple docstring""" def _a ( self : List[Any] , _snake_case : list[list[float]] , _snake_case : list[int] ): """simple docstring""" A__ = 0.0 A__ = 0.0 for i in range(len(_snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def _a ( self : Dict , _snake_case : list[list[int | float]] , _snake_case : list[int] , _snake_case : int , _snake_case : float ): """simple docstring""" for i in range(len(_snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A ( ) -> None: # Training Examples ( m, n ) A__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) A__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training A__ = SelfOrganizingMap() A__ = 3 A__ = 0.5 for _ in range(__UpperCamelCase ): for j in range(len(__UpperCamelCase ) ): # training sample A__ = training_samples[j] # Compute the winning vector A__ = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase ) # Update the winning vector A__ = self_organizing_map.update(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # classify test sample A__ = [0, 0, 0, 1] A__ = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def A ( ) -> Tuple: A__ = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=__UpperCamelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=__UpperCamelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=__UpperCamelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=__UpperCamelCase , default=0 , help='cuda_id.' , ) A__ = parser.parse_args() return args def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: if not len(__UpperCamelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) A__ , A__ = imgs[0].size A__ = Image.new('RGB' , size=(cols * w, rows * h) ) A__ , A__ = grid.size for i, img in enumerate(__UpperCamelCase ): grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h) ) return grid def A ( __UpperCamelCase , __UpperCamelCase="robotic cat with wings" , __UpperCamelCase=7.5 , __UpperCamelCase=50 , __UpperCamelCase=1 , __UpperCamelCase=42 , ) -> Union[str, Any]: A__ = torch.Generator(pipeline.device ).manual_seed(__UpperCamelCase ) A__ = pipeline( __UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images A__ = int(math.sqrt(__UpperCamelCase ) ) A__ = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images SCREAMING_SNAKE_CASE__ = parse_args() # Load models and create wrapper for stable diffusion SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') SCREAMING_SNAKE_CASE__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') SCREAMING_SNAKE_CASE__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') SCREAMING_SNAKE_CASE__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) SCREAMING_SNAKE_CASE__ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): SCREAMING_SNAKE_CASE__ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: SCREAMING_SNAKE_CASE__ = unet.to(torch.device('''cuda''', args.cuda_id)) SCREAMING_SNAKE_CASE__ = pipeline.to(unet.device) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) SCREAMING_SNAKE_CASE__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
720
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( __UpperCamelCase ) -> Tuple: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [] if args.gold_data_mode == "qa": A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase ) for answer_list in data[1]: A__ = ast.literal_eval(__UpperCamelCase ) answers.append(__UpperCamelCase ) else: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [[reference] for reference in references] A__ = A__ = A__ = 0 for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 100.0 * em / total A__ = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = args.k A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = A__ = 0 for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ): A__ = set(hypo.split('\t' )[:k] ) A__ = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A__ = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: def strip_title(__UpperCamelCase ): if title.startswith('"' ): A__ = title[1:] if title.endswith('"' ): A__ = title[:-1] return title A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device ) A__ = rag_model.rag.question_encoder(__UpperCamelCase ) A__ = question_enc_outputs[0] A__ = rag_model.retriever( __UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A__ = [] for docs in all_docs: A__ = [strip_title(__UpperCamelCase ) for title in docs['title']] provenance_strings.append('\t'.join(__UpperCamelCase ) ) return provenance_strings def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: with torch.no_grad(): A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase ) A__ = inputs_dict.input_ids.to(args.device ) A__ = inputs_dict.attention_mask.to(args.device ) A__ = rag_model.generate( # rag_model overwrites generate __UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) if args.print_predictions: for q, a in zip(__UpperCamelCase , __UpperCamelCase ): logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) ) return answers def A ( ) -> Any: A__ = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) A__ = parser.parse_args() A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A ( __UpperCamelCase ) -> int: A__ = {} if args.model_type is None: A__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration A__ = args.n_docs if args.index_name is not None: A__ = args.index_name if args.index_path is not None: A__ = args.index_path else: A__ = BartForConditionalGeneration A__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase ) A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase ) model.retriever.init_retrieval() else: A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: A__ = [] for line in tqdm(__UpperCamelCase ): questions.append(line.strip() ) if len(__UpperCamelCase ) == args.eval_batch_size: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) + '\n' ) preds_file.flush() A__ = [] if len(__UpperCamelCase ) > 0: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) ) preds_file.flush() score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = get_args() main(args)
52
0
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''', datefmt='''%Y-%m-%d %H:%M:%S''', level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(), stream=sys.stdout, ) SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ = {'''facebook/bart-base''': BartForConditionalGeneration} SCREAMING_SNAKE_CASE__ = {'''facebook/bart-base''': BartTokenizer} def A ( ) -> Optional[int]: A__ = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=__UpperCamelCase , default=__UpperCamelCase , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=__UpperCamelCase , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=__UpperCamelCase , default=__UpperCamelCase , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=__UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__UpperCamelCase , ) parser.add_argument( '--config_name' , type=__UpperCamelCase , default=__UpperCamelCase , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=__UpperCamelCase , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=__UpperCamelCase , default=__UpperCamelCase , help='Where to store the final ONNX file.' ) A__ = parser.parse_args() return args def A ( __UpperCamelCase , __UpperCamelCase="cpu" ) -> str: A__ = model_dict[model_name].from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) A__ = tokenizer_dict[model_name].from_pretrained(__UpperCamelCase ) if model_name in ["facebook/bart-base"]: A__ = 0 A__ = None A__ = 0 return huggingface_model, tokenizer def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: model.eval() A__ = None A__ = torch.jit.script(BARTBeamSearchGenerator(__UpperCamelCase ) ) with torch.no_grad(): A__ = 'My friends are cool but they eat too many carbs.' A__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device ) A__ = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=__UpperCamelCase , max_length=__UpperCamelCase , early_stopping=__UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( __UpperCamelCase , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , __UpperCamelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=__UpperCamelCase , ) logger.info('Model exported to {}'.format(__UpperCamelCase ) ) A__ = remove_dup_initializers(os.path.abspath(__UpperCamelCase ) ) logger.info('Deduplicated and optimized model written to {}'.format(__UpperCamelCase ) ) A__ = onnxruntime.InferenceSession(__UpperCamelCase ) A__ = ort_sess.run( __UpperCamelCase , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(__UpperCamelCase ), 'max_length': np.array(__UpperCamelCase ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def A ( ) -> Tuple: A__ = parse_args() A__ = 5 A__ = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() A__ = torch.device(args.device ) A__ , A__ = load_model_tokenizer(args.model_name_or_path , __UpperCamelCase ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(__UpperCamelCase ) if args.max_length: A__ = args.max_length if args.num_beams: A__ = args.num_beams if args.output_file_path: A__ = args.output_file_path else: A__ = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
721
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def _a ( self : Any ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ): """simple docstring""" A__ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Dict ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : Dict ): """simple docstring""" A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : int ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def _a ( self : List[str] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: A__ = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _a ( self : int ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Union[str, Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def _a ( self : List[Any] ): """simple docstring""" A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = model(**_snake_case ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
52
0
from __future__ import annotations import time SCREAMING_SNAKE_CASE__ = list[tuple[int, int]] SCREAMING_SNAKE_CASE__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] SCREAMING_SNAKE_CASE__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __lowerCAmelCase : """simple docstring""" def __init__( self : Dict , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None ): """simple docstring""" A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = parent class __lowerCAmelCase : """simple docstring""" def __init__( self : Any , _snake_case : tuple[int, int] , _snake_case : tuple[int, int] ): """simple docstring""" A__ = Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case ) A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case ) A__ = [self.start] A__ = False def _a ( self : Tuple ): """simple docstring""" while self.node_queue: A__ = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: A__ = True return self.retrace_path(_snake_case ) A__ = self.get_successors(_snake_case ) for node in successors: self.node_queue.append(_snake_case ) if not self.reached: return [self.start.pos] return None def _a ( self : List[str] , _snake_case : Node ): """simple docstring""" A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case ) ) return successors def _a ( self : Dict , _snake_case : Node | None ): """simple docstring""" A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class __lowerCAmelCase : """simple docstring""" def __init__( self : str , _snake_case : str , _snake_case : int ): """simple docstring""" A__ = BreadthFirstSearch(_snake_case , _snake_case ) A__ = BreadthFirstSearch(_snake_case , _snake_case ) A__ = False def _a ( self : List[str] ): """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: A__ = self.fwd_bfs.node_queue.pop(0 ) A__ = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: A__ = True return self.retrace_bidirectional_path( _snake_case , _snake_case ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ), self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(_snake_case ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _a ( self : Optional[Any] , _snake_case : Node , _snake_case : Node ): """simple docstring""" A__ = self.fwd_bfs.retrace_path(_snake_case ) A__ = self.bwd_bfs.retrace_path(_snake_case ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = (0, 0) SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) SCREAMING_SNAKE_CASE__ = time.time() SCREAMING_SNAKE_CASE__ = BreadthFirstSearch(init, goal) SCREAMING_SNAKE_CASE__ = bfs.search() SCREAMING_SNAKE_CASE__ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) SCREAMING_SNAKE_CASE__ = time.time() SCREAMING_SNAKE_CASE__ = BidirectionalBreadthFirstSearch(init, goal) SCREAMING_SNAKE_CASE__ = bd_bfs.search() SCREAMING_SNAKE_CASE__ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
700
def A ( __UpperCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
52
0
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE__ = logging.getLogger() SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Optional[int] , _snake_case : int ): """simple docstring""" os.makedirs(_snake_case , exist_ok=_snake_case ) A__ = {'source': 'What is love ?', 'target': 'life'} A__ = {'train': 12, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: A__ = '\n'.join([contents[field]] * n_lines[split] ) with open(os.path.join(_snake_case , F'''{split}.{field}''' ) , 'w' ) as f: f.write(_snake_case ) def _a ( self : Union[str, Any] , _snake_case : int , _snake_case : str = "pytorch" ): """simple docstring""" A__ = self.get_auto_remove_tmp_dir() A__ = os.path.join(_snake_case , 'output' ) A__ = os.path.join(_snake_case , 'data' ) self._create_dummy_data(data_dir=_snake_case ) A__ = F''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(F'''--gpus={gpus}''' ) if is_apex_available(): testargs.append('--fp16' ) else: testargs.append('--gpus=0' ) testargs.append('--distributed_backend=ddp_cpu' ) testargs.append('--num_processes=2' ) A__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(_snake_case , env=self.get_env() ) A__ = os.path.join(_snake_case , 'metrics.json' ) with open(_snake_case ) as f: A__ = json.load(_snake_case ) return result @require_torch_gpu def _a ( self : Dict ): """simple docstring""" A__ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu def _a ( self : Tuple ): """simple docstring""" A__ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_gpu @require_ray def _a ( self : List[Any] ): """simple docstring""" A__ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu @require_ray def _a ( self : Optional[int] ): """simple docstring""" A__ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
701
from typing import Dict from .base import GenericTensor, Pipeline class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ): """simple docstring""" if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ): """simple docstring""" A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Dict ): """simple docstring""" A__ = self.model(**_snake_case ) return model_outputs def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ): """simple docstring""" return super().__call__(*_snake_case , **_snake_case )
52
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class __lowerCAmelCase ( UpperCAmelCase_ ): def __init__( self : str , _snake_case : int , _snake_case : List[Any] ): """simple docstring""" A__ = params A__ = np.array(_snake_case ) A__ = np.array([len(_snake_case ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : List[Any] , _snake_case : Tuple ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[Any] ): """simple docstring""" return len(self.lengths ) def _a ( self : str ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _a ( self : List[str] ): """simple docstring""" A__ = self.params.max_model_input_size A__ = self.lengths > max_len logger.info(F'''Splitting {sum(_snake_case )} too long sequences.''' ) def divide_chunks(_snake_case : int , _snake_case : Optional[int] ): return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )] A__ = [] A__ = [] if self.params.mlm: A__ , A__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: A__ , A__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: A__ = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: A__ = np.insert(_snake_case , 0 , _snake_case ) if sub_s[-1] != sep_id: A__ = np.insert(_snake_case , len(_snake_case ) , _snake_case ) assert len(_snake_case ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_snake_case ) new_tok_ids.extend(_snake_case ) new_lengths.extend([len(_snake_case ) for l in sub_seqs] ) A__ = np.array(_snake_case ) A__ = np.array(_snake_case ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = len(self ) A__ = self.lengths > 11 A__ = self.token_ids[indices] A__ = self.lengths[indices] A__ = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _a ( self : Dict ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: A__ = self.params.special_tok_ids['unk_token'] A__ = len(self ) A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) A__ = (unk_occs / self.lengths) < 0.5 A__ = self.token_ids[indices] A__ = self.lengths[indices] A__ = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _a ( self : Tuple ): """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _a ( self : int , _snake_case : Optional[Any] ): """simple docstring""" A__ = [t[0] for t in batch] A__ = [t[1] for t in batch] assert len(_snake_case ) == len(_snake_case ) # Max for paddings A__ = max(_snake_case ) # Pad token ids if self.params.mlm: A__ = self.params.special_tok_ids['pad_token'] else: A__ = self.params.special_tok_ids['unk_token'] A__ = [list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids] assert len(tk_ ) == len(_snake_case ) assert all(len(_snake_case ) == max_seq_len_ for t in tk_ ) A__ = torch.tensor(tk_ ) # (bs, max_seq_len_) A__ = torch.tensor(_snake_case ) # (bs) return tk_t, lg_t
702
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) A__ : str = field(metadata={"help": "Should contain the data files for the task."} ) A__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: A__ = processors[data_args.task_name]() A__ = processor.get_labels() A__ = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase ) -> Dict: A__ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )} # Data collator A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A__ = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def A ( __UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
52
0
from __future__ import annotations def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative in a semiconductor' ) elif hole_conc < 0: raise ValueError('Hole concentration cannot be negative in a semiconductor' ) elif intrinsic_conc < 0: raise ValueError( 'Intrinsic concentration cannot be negative in a semiconductor' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
703
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
52
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
704
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100' SCREAMING_SNAKE_CASE__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: SCREAMING_SNAKE_CASE__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: SCREAMING_SNAKE_CASE__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
52
0
import re def A ( __UpperCamelCase ) -> str: if len(re.findall('[ATCG]' , __UpperCamelCase ) ) != len(__UpperCamelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
705
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Any = IFInpaintingPipeline A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"} def _a ( self : Any ): """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): A__ = torch.manual_seed(_snake_case ) else: A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self : Dict ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _a ( self : int ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _a ( self : Optional[int] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _a ( self : List[str] ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _a ( self : Dict ): """simple docstring""" self._test_save_load_local() def _a ( self : Optional[int] ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
52
0
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : Dict , _snake_case : Tuple , _snake_case : Optional[Any]=13 , _snake_case : str=32 , _snake_case : Tuple=3 , _snake_case : int=4 , _snake_case : Any=[10, 20, 30, 40] , _snake_case : Optional[int]=[2, 2, 3, 2] , _snake_case : Optional[int]=True , _snake_case : List[str]=True , _snake_case : Any=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=10 , _snake_case : Optional[int]=0.02 , _snake_case : List[str]=["stage2", "stage3", "stage4"] , _snake_case : Union[str, Any]=[2, 3, 4] , _snake_case : List[str]=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_stages A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = intermediate_size A__ = hidden_act A__ = num_labels A__ = initializer_range A__ = out_features A__ = out_indices A__ = scope def _a ( self : Tuple ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _a ( self : List[str] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ConvNextVaModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Any ): """simple docstring""" A__ = ConvNextVaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any ): """simple docstring""" A__ = ConvNextVaBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A__ = None A__ = ConvNextVaBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : Tuple ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict def _a ( self : Tuple ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) A__ : List[str] = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) A__ : Optional[int] = False A__ : Union[str, Any] = False A__ : Tuple = False A__ : Dict = False A__ : str = False def _a ( self : List[Any] ): """simple docstring""" A__ = ConvNextVaModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : List[str] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : str ): """simple docstring""" return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def _a ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def _a ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels() A__ = True if model_class.__name__ in [ *get_values(_snake_case ), *get_values(_snake_case ), ]: continue A__ = model_class(_snake_case ) model.to(_snake_case ) model.train() A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) A__ = model(**_snake_case ).loss loss.backward() def _a ( self : Optional[int] ): """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels() A__ = False A__ = True if ( model_class.__name__ in [*get_values(_snake_case ), *get_values(_snake_case )] or not model_class.supports_gradient_checkpointing ): continue A__ = model_class(_snake_case ) model.to(_snake_case ) model.gradient_checkpointing_enable() model.train() A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) A__ = model(**_snake_case ).loss loss.backward() def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : List[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" def check_hidden_states_output(_snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ): A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(_snake_case ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def _a ( self : int ): """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ConvNextVaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Any: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def _a ( self : Optional[int] ): """simple docstring""" A__ = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = preprocessor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
706
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) SCREAMING_SNAKE_CASE__ = r''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ): """simple docstring""" for processor in self: A__ = inspect.signature(processor.__call__ ).parameters if len(_snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' F'''{processor.__class__} are passed to the logits processor.''' ) A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case ) else: A__ = processor(_snake_case , _snake_case , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : float ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' ) A__ = temperature def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores / self.temperature return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) A__ = top_p A__ = filter_value A__ = min_tokens_to_keep def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] ) A__ = jnp.full_like(_snake_case , self.filter_value ) A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 ) A__ = cumulative_probs < self.top_p # include the token that is higher than top_p as well A__ = jnp.roll(_snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(_snake_case ) # min tokens to keep A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case ) A__ = jnp.where(_snake_case , _snake_case , _snake_case ) A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1] return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) A__ = max(_snake_case , _snake_case ) A__ = filter_value def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = scores.shape A__ = jnp.full(batch_size * vocab_size , self.filter_value ) A__ = min(self.top_k , scores.shape[-1] ) # Safety check A__ , A__ = lax.top_k(_snake_case , _snake_case ) A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A__ = topk_scores.flatten() A__ = topk_indices.flatten() + shift A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case ) A__ = next_scores_flat.reshape(_snake_case , _snake_case ) return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int ): """simple docstring""" A__ = bos_token_id def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int , _snake_case : int ): """simple docstring""" A__ = max_length A__ = eos_token_id def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) A__ = min_length A__ = eos_token_id def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ): """simple docstring""" A__ = list(_snake_case ) A__ = begin_index def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.bool_(cur_len - self.begin_index ) A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : list ): """simple docstring""" A__ = list(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , _snake_case : Optional[Any] ): """simple docstring""" A__ = dict(_snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A__ = force_token_array.at[index].set(_snake_case ) A__ = jnp.intaa(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" def _force_token(_snake_case : Dict ): A__ = scores.shape[0] A__ = self.force_token_array[generation_idx] A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' ) A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) ) return new_scores A__ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ): """simple docstring""" A__ = generate_config.eos_token_id A__ = generate_config.no_timestamps_token_id A__ = generate_config.no_timestamps_token_id + 1 A__ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_snake_case , 'max_initial_timestamp_index' ): A__ = generate_config.max_initial_timestamp_index else: A__ = model_config.vocab_size if self.max_initial_timestamp_index is None: A__ = model_config.vocab_size def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ): """simple docstring""" A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(_snake_case : Dict , _snake_case : str ): A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , ) A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , ) return jnp.where( _snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case ) A__ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , ) A__ = self.timestamp_begin + self.max_initial_timestamp_index A__ = jnp.where( _snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp A__ = jax.nn.log_softmax(_snake_case , axis=-1 ) def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ): A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A__ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) return scores
52
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
707
import argparse import struct import unittest class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _snake_case : bytes ): """simple docstring""" A__ = data # Initialize hash values A__ = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants A__ = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] A__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _snake_case : bytes ): """simple docstring""" A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64)) A__ = struct.pack('>Q' , (len(_snake_case ) * 8) ) return data + padding + big_endian_integer def _a ( self : Optional[int] ): """simple docstring""" A__ = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A__ = list(struct.unpack('>16L' , _snake_case ) ) # add 48 0-ed integers words += [0] * 48 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A__ = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A__ = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 ) A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g) A__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 ) A__ = (a & b) ^ (a & c) ^ (b & c) A__ = (sa + maj) % 0x100000000 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A__ = [a, b, c, d, e, f, g, h] # Modify final values A__ = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" import hashlib A__ = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() ) def A ( ) -> None: import doctest doctest.testmod() A__ = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A__ = parser.parse_args() A__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A__ = f.read() else: A__ = bytes(__UpperCamelCase , 'utf-8' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
52
0
import random def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> dict: A__ = {i: [] for i in range(__UpperCamelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(__UpperCamelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(__UpperCamelCase ): for j in range(i + 1 , __UpperCamelCase ): if random.random() < probability: graph[i].append(__UpperCamelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(__UpperCamelCase ) return graph def A ( __UpperCamelCase ) -> dict: return { i: [j for j in range(__UpperCamelCase ) if i != j] for i in range(__UpperCamelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
708
import math import random def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value SCREAMING_SNAKE_CASE__ = 0.02 def A ( __UpperCamelCase , __UpperCamelCase ) -> float: A__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__UpperCamelCase ): # Forward propagation A__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A__ = (expected / 100) - layer_a # Error delta A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = int(input('''Expected value: ''')) SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
52
0
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase): """simple docstring""" A__ : str = ProphetNetTokenizer A__ : List[str] = False def _a ( self : Dict ): """simple docstring""" super().setUp() A__ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _a ( self : Any , _snake_case : Union[str, Any] ): """simple docstring""" A__ = 'UNwant\u00E9d,running' A__ = 'unwanted, running' return input_text, output_text def _a ( self : List[Any] ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file ) A__ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] ) def _a ( self : List[str] ): """simple docstring""" A__ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def _a ( self : Dict ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _a ( self : List[Any] ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def _a ( self : Any ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _a ( self : Tuple ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _a ( self : Any ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self : str ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self : Tuple ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self : Any ): """simple docstring""" A__ = BasicTokenizer(do_lower_case=_snake_case , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def _a ( self : Optional[int] ): """simple docstring""" A__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] A__ = {} for i, token in enumerate(_snake_case ): A__ = i A__ = WordpieceTokenizer(vocab=_snake_case , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def _a ( self : List[str] ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] A__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02] A__ = tokenizer(_snake_case , padding=_snake_case , return_tensors='pt' ) self.assertIsInstance(_snake_case , _snake_case ) A__ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_snake_case , _snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def _a ( self : Union[str, Any] ): """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def _a ( self : str ): """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def _a ( self : List[str] ): """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def _a ( self : Any ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == text + [1_02] assert encoded_pair == text + [1_02] + text_a + [1_02]
709
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A__ = AutoTokenizer.from_pretrained('google/mt5-small' ) A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean() A__ = -(labels.shape[-1] * loss.item()) A__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
52
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : Any ): """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) A__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) A__ = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } A__ = os.path.join(self.tmpdirname , _snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(_snake_case , _snake_case ) def _a ( self : Union[str, Any] , **_snake_case : Tuple ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Dict , **_snake_case : Union[str, Any] ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Optional[int] , **_snake_case : Union[str, Any] ): """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Dict ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : Optional[int] ): """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case ) A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _snake_case ) self.assertIsInstance(processor_fast.tokenizer , _snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _snake_case ) self.assertIsInstance(processor_fast.image_processor , _snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _snake_case ) def _a ( self : int ): """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) A__ = self.prepare_image_inputs() A__ = image_processor(_snake_case , return_tensors='np' ) A__ = processor(images=_snake_case , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Any ): """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) A__ = 'lower newer' A__ = processor(text=_snake_case ) A__ = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) A__ = 'lower newer' A__ = self.prepare_image_inputs() A__ = processor(text=_snake_case , images=_snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(_snake_case ): processor() def _a ( self : str ): """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(_snake_case ) A__ = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) def _a ( self : Tuple ): """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case ) A__ = 'lower newer' A__ = self.prepare_image_inputs() A__ = processor(text=_snake_case , images=_snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
710
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = "roberta" def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
52
0
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : str , *_snake_case : Union[str, Any] , **_snake_case : str ): """simple docstring""" warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
711
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : int = LongformerTokenizer A__ : Optional[int] = True A__ : Any = LongformerTokenizerFast A__ : Dict = True def _a ( self : int ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) def _a ( self : int , **_snake_case : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Optional[int] , **_snake_case : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Any , _snake_case : Optional[Any] ): """simple docstring""" A__ = 'lower newer' A__ = 'lower newer' return input_text, output_text def _a ( self : Any ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = 'lower newer' A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True) self.assertListEqual(_snake_case , _snake_case ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() A__ = 'Encode this sequence.' A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_snake_case , _snake_case ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_snake_case , _snake_case ) # Testing spaces after special tokens A__ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space A__ = tokenizer.convert_tokens_to_ids(_snake_case ) A__ = 'Encode <mask> sequence' A__ = 'Encode <mask>sequence' A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_snake_case , _snake_case ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = 'A, <mask> AllenNLP sentence.' A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _a ( self : List[Any] ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['trim_offsets'] , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` A__ = F'''{text_of_1_token} {text_of_1_token}''' A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
52
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class __lowerCAmelCase : """simple docstring""" A__ : int A__ : Node | None = None A__ : Node | None = None def A ( ) -> Node | None: A__ = Node(1 ) A__ = Node(2 ) A__ = Node(3 ) A__ = Node(4 ) A__ = Node(5 ) return tree def A ( __UpperCamelCase ) -> list[int]: return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def A ( __UpperCamelCase ) -> list[int]: return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def A ( __UpperCamelCase ) -> list[int]: return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def A ( __UpperCamelCase ) -> int: return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def A ( __UpperCamelCase ) -> Sequence[Node | None]: A__ = [] if root is None: return output A__ = deque([root] ) while process_queue: A__ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def A ( __UpperCamelCase , __UpperCamelCase ) -> Sequence[Node | None]: A__ = [] def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(__UpperCamelCase , __UpperCamelCase ) return output def A ( __UpperCamelCase , __UpperCamelCase ) -> Sequence[Node | None]: A__ = [] def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(__UpperCamelCase , __UpperCamelCase ) return output def A ( __UpperCamelCase ) -> Sequence[Node | None] | list[Any]: if root is None: return [] A__ = [] A__ = 0 A__ = height(__UpperCamelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(__UpperCamelCase , __UpperCamelCase ) ) A__ = 1 else: output.append(get_nodes_from_right_to_left(__UpperCamelCase , __UpperCamelCase ) ) A__ = 0 return output def A ( ) -> None: # Main function for testing. A__ = make_tree() print(f'''In-order Traversal: {inorder(__UpperCamelCase )}''' ) print(f'''Pre-order Traversal: {preorder(__UpperCamelCase )}''' ) print(f'''Post-order Traversal: {postorder(__UpperCamelCase )}''' , '\n' ) print(f'''Height of Tree: {height(__UpperCamelCase )}''' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(__UpperCamelCase ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(__UpperCamelCase ) + 1 ): print(f'''Level {level}:''' , get_nodes_from_left_to_right(__UpperCamelCase , level=__UpperCamelCase ) ) print('\nZigZag order Traversal: ' ) print(zigzag(__UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
712
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def A ( __UpperCamelCase ) -> str: config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? A__ = tmp_path_factory.getbasetemp() / 'cache' A__ = test_hf_cache_home / 'datasets' A__ = test_hf_cache_home / 'metrics' A__ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) ) @pytest.fixture(autouse=__UpperCamelCase , scope='session' ) def A ( ) -> Union[str, Any]: datasets.disable_progress_bar() @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase ) -> int: # don't take tests into account when counting downloads monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase ) @pytest.fixture def A ( __UpperCamelCase ) -> Any: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
52
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[Any] = "naver-clova-ix/donut-base-finetuned-docvqa" A__ : Optional[Any] = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) A__ : Tuple = "document_qa" A__ : str = AutoProcessor A__ : Union[str, Any] = VisionEncoderDecoderModel A__ : int = ["image", "text"] A__ : Any = ["text"] def __init__( self : Optional[int] , *_snake_case : Tuple , **_snake_case : Tuple ): """simple docstring""" if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*_snake_case , **_snake_case ) def _a ( self : Union[str, Any] , _snake_case : "Image" , _snake_case : str ): """simple docstring""" A__ = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' A__ = task_prompt.replace('{user_input}' , _snake_case ) A__ = self.pre_processor.tokenizer( _snake_case , add_special_tokens=_snake_case , return_tensors='pt' ).input_ids A__ = self.pre_processor(_snake_case , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : int , _snake_case : Optional[Any] ): """simple docstring""" return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_snake_case , ).sequences def _a ( self : List[Any] , _snake_case : List[str] ): """simple docstring""" A__ = self.pre_processor.batch_decode(_snake_case )[0] A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) A__ = re.sub(R'<.*?>' , '' , _snake_case , count=1 ).strip() # remove first task start token A__ = self.pre_processor.tokenajson(_snake_case ) return sequence["answer"]
713
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
52
0
from __future__ import annotations def A ( __UpperCamelCase ) -> float: if not nums: raise ValueError('List is empty' ) return sum(__UpperCamelCase ) / len(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
714
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
52
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
715
from typing import TYPE_CHECKING from ..utils import _LazyModule SCREAMING_SNAKE_CASE__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from __future__ import annotations from random import choice def A ( __UpperCamelCase ) -> Optional[Any]: return choice(__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> int: A__ = random_pivot(__UpperCamelCase ) # partition based on pivot # linear time A__ = [e for e in lst if e < pivot] A__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__UpperCamelCase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__UpperCamelCase ) < k - 1: return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
716
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
52
0
from typing import Dict from .base import GenericTensor, Pipeline class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ): """simple docstring""" if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ): """simple docstring""" A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Dict ): """simple docstring""" A__ = self.model(**_snake_case ) return model_outputs def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ): """simple docstring""" return super().__call__(*_snake_case , **_snake_case )
717
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[Any] = "transfo-xl" A__ : Optional[int] = ["mems"] A__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[Any] , _snake_case : Tuple=26_77_35 , _snake_case : Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , _snake_case : int=10_24 , _snake_case : int=10_24 , _snake_case : int=16 , _snake_case : Any=64 , _snake_case : int=40_96 , _snake_case : Dict=4 , _snake_case : int=False , _snake_case : Optional[int]=18 , _snake_case : List[str]=16_00 , _snake_case : Optional[Any]=10_00 , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=True , _snake_case : Optional[Any]=0 , _snake_case : Union[str, Any]=-1 , _snake_case : Tuple=True , _snake_case : Tuple=0.1 , _snake_case : Union[str, Any]=0.0 , _snake_case : str=True , _snake_case : Union[str, Any]="normal" , _snake_case : Optional[Any]=0.01 , _snake_case : List[str]=0.01 , _snake_case : Union[str, Any]=0.02 , _snake_case : Optional[int]=1E-5 , _snake_case : int=0 , **_snake_case : Union[str, Any] , ): """simple docstring""" A__ = vocab_size A__ = [] self.cutoffs.extend(_snake_case ) if proj_share_all_but_first: A__ = [False] + [True] * len(self.cutoffs ) else: A__ = [False] + [False] * len(self.cutoffs ) A__ = d_model A__ = d_embed A__ = d_head A__ = d_inner A__ = div_val A__ = pre_lnorm A__ = n_layer A__ = n_head A__ = mem_len A__ = same_length A__ = attn_type A__ = clamp_len A__ = sample_softmax A__ = adaptive A__ = dropout A__ = dropatt A__ = untie_r A__ = init A__ = init_range A__ = proj_init_std A__ = init_std A__ = layer_norm_epsilon super().__init__(eos_token_id=_snake_case , **_snake_case ) @property def _a ( self : Optional[Any] ): """simple docstring""" logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def _a ( self : Any , _snake_case : Any ): """simple docstring""" raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
718
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
52
0
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) SCREAMING_SNAKE_CASE__ = Path(__file__).parent / '''model_card_template.md''' SCREAMING_SNAKE_CASE__ = uuida().hex SCREAMING_SNAKE_CASE__ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES SCREAMING_SNAKE_CASE__ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES SCREAMING_SNAKE_CASE__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def A ( __UpperCamelCase = None ) -> str: A__ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase , __UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): ua += "; " + user_agent return ua def A ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ) -> Optional[int]: if token is None: A__ = HfFolder.get_token() if organization is None: A__ = whoami(__UpperCamelCase )['name'] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict: if not is_jinja_available(): raise ValueError( 'Modelcard rendering is based on Jinja templates.' ' Please make sure to have `jinja` installed before using `create_model_card`.' ' To install it, please run `pip install Jinja2`.' ) if hasattr(__UpperCamelCase , 'local_rank' ) and args.local_rank not in [-1, 0]: return A__ = args.hub_token if hasattr(__UpperCamelCase , 'hub_token' ) else None A__ = get_full_repo_name(__UpperCamelCase , token=__UpperCamelCase ) A__ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__UpperCamelCase , model_name=__UpperCamelCase , repo_name=__UpperCamelCase , dataset_name=args.dataset_name if hasattr(__UpperCamelCase , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase , 'gradient_accumulation_steps' ) else None ) , adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__UpperCamelCase , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , ) A__ = os.path.join(args.output_dir , 'README.md' ) model_card.save(__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase = None ) -> Optional[int]: if resolved_file is None or commit_hash is not None: return commit_hash A__ = str(Path(__UpperCamelCase ).as_posix() ) A__ = re.search(r'snapshots/([^/]+)/' , __UpperCamelCase ) if search is None: return None A__ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. SCREAMING_SNAKE_CASE__ = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) SCREAMING_SNAKE_CASE__ = os.path.join(hf_cache_home, '''diffusers''') def A ( __UpperCamelCase = None , __UpperCamelCase = None ) -> None: if new_cache_dir is None: A__ = DIFFUSERS_CACHE if old_cache_dir is None: A__ = old_diffusers_cache A__ = Path(__UpperCamelCase ).expanduser() A__ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob('**/blobs/*' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A__ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase , __UpperCamelCase ) try: os.symlink(__UpperCamelCase , __UpperCamelCase ) except OSError: logger.warning( 'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). SCREAMING_SNAKE_CASE__ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): SCREAMING_SNAKE_CASE__ = 0 else: with open(cache_version_file) as f: try: SCREAMING_SNAKE_CASE__ = int(f.read()) except ValueError: SCREAMING_SNAKE_CASE__ = 0 if cache_version < 1: SCREAMING_SNAKE_CASE__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: SCREAMING_SNAKE_CASE__ = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ' '''the directory exists and can be written to.''' ) def A ( __UpperCamelCase , __UpperCamelCase = None ) -> str: if variant is not None: A__ = weights_name.split('.' ) A__ = splits[:-1] + [variant] + splits[-1:] A__ = '.'.join(__UpperCamelCase ) return weights_name def A ( __UpperCamelCase , *, __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ) -> str: A__ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) ): # Load from a PyTorch checkpoint A__ = os.path.join(__UpperCamelCase , __UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ): A__ = os.path.join(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse('0.20.0' ) ): try: A__ = hf_hub_download( __UpperCamelCase , filename=_add_variant(__UpperCamelCase , __UpperCamelCase ) , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , user_agent=__UpperCamelCase , subfolder=__UpperCamelCase , revision=revision or commit_hash , ) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , __UpperCamelCase , ) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase , __UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase , __UpperCamelCase )}\' so that the correct variant file can be added.''' , __UpperCamelCase , ) try: # 2. Load model file as usual A__ = hf_hub_download( __UpperCamelCase , filename=__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , user_agent=__UpperCamelCase , subfolder=__UpperCamelCase , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' 'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ' 'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ' 'login`.' ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' 'this model name. Check the model page at ' f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' ' \nCheckout your internet connection or see how to run the library in' ' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' '\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ' f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
import math import random def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value SCREAMING_SNAKE_CASE__ = 0.02 def A ( __UpperCamelCase , __UpperCamelCase ) -> float: A__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__UpperCamelCase ): # Forward propagation A__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A__ = (expected / 100) - layer_a # Error delta A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = int(input('''Expected value: ''')) SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
720
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( __UpperCamelCase ) -> Tuple: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [] if args.gold_data_mode == "qa": A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase ) for answer_list in data[1]: A__ = ast.literal_eval(__UpperCamelCase ) answers.append(__UpperCamelCase ) else: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [[reference] for reference in references] A__ = A__ = A__ = 0 for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 100.0 * em / total A__ = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = args.k A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = A__ = 0 for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ): A__ = set(hypo.split('\t' )[:k] ) A__ = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A__ = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: def strip_title(__UpperCamelCase ): if title.startswith('"' ): A__ = title[1:] if title.endswith('"' ): A__ = title[:-1] return title A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device ) A__ = rag_model.rag.question_encoder(__UpperCamelCase ) A__ = question_enc_outputs[0] A__ = rag_model.retriever( __UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A__ = [] for docs in all_docs: A__ = [strip_title(__UpperCamelCase ) for title in docs['title']] provenance_strings.append('\t'.join(__UpperCamelCase ) ) return provenance_strings def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: with torch.no_grad(): A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase ) A__ = inputs_dict.input_ids.to(args.device ) A__ = inputs_dict.attention_mask.to(args.device ) A__ = rag_model.generate( # rag_model overwrites generate __UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) if args.print_predictions: for q, a in zip(__UpperCamelCase , __UpperCamelCase ): logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) ) return answers def A ( ) -> Any: A__ = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) A__ = parser.parse_args() A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A ( __UpperCamelCase ) -> int: A__ = {} if args.model_type is None: A__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration A__ = args.n_docs if args.index_name is not None: A__ = args.index_name if args.index_path is not None: A__ = args.index_path else: A__ = BartForConditionalGeneration A__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase ) A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase ) model.retriever.init_retrieval() else: A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: A__ = [] for line in tqdm(__UpperCamelCase ): questions.append(line.strip() ) if len(__UpperCamelCase ) == args.eval_batch_size: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) + '\n' ) preds_file.flush() A__ = [] if len(__UpperCamelCase ) > 0: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) ) preds_file.flush() score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = get_args() main(args)
52
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ = '''bart''' SCREAMING_SNAKE_CASE__ = True @st.cache(allow_output_mutation=__UpperCamelCase ) def A ( ) -> List[str]: if LOAD_DENSE_INDEX: A__ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) A__ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) A__ = qar_model.eval() else: A__ , A__ = (None, None) if MODEL_TYPE == "bart": A__ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) A__ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) A__ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) A__ = sas_model.eval() else: A__ , A__ = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__UpperCamelCase ) def A ( ) -> Any: if LOAD_DENSE_INDEX: A__ = faiss.StandardGpuResources() A__ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] A__ = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , ) A__ = faiss.IndexFlatIP(128 ) A__ = faiss.index_cpu_to_gpu(__UpperCamelCase , 1 , __UpperCamelCase ) wikiaab_gpu_index_flat.add(__UpperCamelCase ) # TODO fix for larger GPU else: A__ , A__ = (None, None) A__ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__UpperCamelCase ) def A ( ) -> List[str]: A__ = datasets.load_dataset('eli5' , name='LFQA_reddit' ) A__ = elia['train_eli5'] A__ = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) ) A__ = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__UpperCamelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_train_data() def A ( __UpperCamelCase , __UpperCamelCase=10 ) -> int: A__ = embed_questions_for_retrieval([question] , __UpperCamelCase , __UpperCamelCase ) A__ , A__ = eli5_train_q_index.search(__UpperCamelCase , __UpperCamelCase ) A__ = [elia_train[int(__UpperCamelCase )] for i in I[0]] return nn_examples def A ( __UpperCamelCase , __UpperCamelCase="wiki40b" , __UpperCamelCase="dense" , __UpperCamelCase=10 ) -> int: if source == "none": A__ , A__ = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": A__ , A__ = query_qa_dense_index( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A__ , A__ = query_es_index( __UpperCamelCase , __UpperCamelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCamelCase , ) A__ = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] A__ = 'question: {} context: {}'.format(__UpperCamelCase , __UpperCamelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __UpperCamelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCamelCase : None), } ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=64 , __UpperCamelCase=256 , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=0.95 , __UpperCamelCase=0.8 ) -> Tuple: with torch.no_grad(): A__ = qa_sas_generate( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_answers=1 , num_beams=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase , do_sample=__UpperCamelCase , temp=__UpperCamelCase , top_p=__UpperCamelCase , top_k=__UpperCamelCase , max_input_length=1_024 , device='cuda:0' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar SCREAMING_SNAKE_CASE__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' SCREAMING_SNAKE_CASE__ = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] SCREAMING_SNAKE_CASE__ = st.sidebar.checkbox('''Demo options''') if demo_options: SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox( '''''', action_list, index=3, ) SCREAMING_SNAKE_CASE__ = action_list.index(action_st) SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) SCREAMING_SNAKE_CASE__ = show_type == '''Show full text of passages''' else: SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: SCREAMING_SNAKE_CASE__ = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: SCREAMING_SNAKE_CASE__ = '''wiki40b''' SCREAMING_SNAKE_CASE__ = '''dense''' SCREAMING_SNAKE_CASE__ = '''beam''' SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 6_4 SCREAMING_SNAKE_CASE__ = 2_5_6 SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = st.sidebar.checkbox('''Generation options''') if generate_options: SCREAMING_SNAKE_CASE__ = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) SCREAMING_SNAKE_CASE__ = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ = st.sidebar.slider( '''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) SCREAMING_SNAKE_CASE__ = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) SCREAMING_SNAKE_CASE__ = None # start main text SCREAMING_SNAKE_CASE__ = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] SCREAMING_SNAKE_CASE__ = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ = st.text_input('''Enter your question here:''', '''''') else: SCREAMING_SNAKE_CASE__ = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = make_support(question, source=wiki_source, method='''dense''', n_results=1_0) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0) SCREAMING_SNAKE_CASE__ = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ = support_list[:1_0] SCREAMING_SNAKE_CASE__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = make_support(question, source=wiki_source, method=index_type, n_results=1_0) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) SCREAMING_SNAKE_CASE__ = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ = '''[{}]({})'''.format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ = sec_titles.split(''' & ''') SCREAMING_SNAKE_CASE__ = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ = find_nearest_training(question) SCREAMING_SNAKE_CASE__ = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) SCREAMING_SNAKE_CASE__ = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) SCREAMING_SNAKE_CASE__ = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
721
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def _a ( self : Any ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ): """simple docstring""" A__ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Dict ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : Dict ): """simple docstring""" A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : int ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def _a ( self : List[str] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: A__ = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _a ( self : int ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Union[str, Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def _a ( self : List[Any] ): """simple docstring""" A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = model(**_snake_case ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
52
0
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def A ( __UpperCamelCase , __UpperCamelCase ) -> str: assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A__ = SqlDatasetReader( 'dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_sql_dataset(__UpperCamelCase , __UpperCamelCase ) @require_sqlalchemy @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = features.copy() if features else default_expected_features A__ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_sql_dataset(__UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase ) -> Union[str, Any]: with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con: A__ = con.cursor() cur.execute('SELECT * FROM dataset' ) for row in cur: yield row @require_sqlalchemy def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: A__ = tmp_path / 'cache' A__ = os.path.join(__UpperCamelCase , 'tmp.sql' ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCamelCase ).read() SqlDatasetWriter(__UpperCamelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write() A__ = iter_sql_file(__UpperCamelCase ) A__ = iter_sql_file(__UpperCamelCase ) for rowa, rowa in zip(__UpperCamelCase , __UpperCamelCase ): assert rowa == rowa @require_sqlalchemy def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: A__ = tmp_path / 'cache' A__ = os.path.join(__UpperCamelCase , 'tmp.sql' ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCamelCase ).read() SqlDatasetWriter(__UpperCamelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write() A__ = iter_sql_file(__UpperCamelCase ) A__ = iter_sql_file(__UpperCamelCase ) for rowa, rowa in zip(__UpperCamelCase , __UpperCamelCase ): assert rowa == rowa @require_sqlalchemy def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: A__ = tmp_path / 'cache' A__ = os.path.join(__UpperCamelCase , 'tmp.sql' ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCamelCase ).read() with pytest.raises(__UpperCamelCase ): SqlDatasetWriter(__UpperCamelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
700
def A ( __UpperCamelCase ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
52
0
from manim import * class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : List[Any] ): """simple docstring""" A__ = Rectangle(height=0.5 , width=0.5 ) A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) A__ = [mem.copy() for i in range(6 )] A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) A__ = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 ) A__ = Text('CPU' , font_size=24 ) A__ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_snake_case ) A__ = [mem.copy() for i in range(1 )] A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) A__ = Text('GPU' , font_size=24 ) A__ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) gpu.align_to(_snake_case , _snake_case ) gpu.set_x(gpu.get_x() - 1 ) self.add(_snake_case ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) A__ = Text('Model' , font_size=24 ) A__ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) model.move_to([3, -1.0, 0] ) self.play( Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , ) A__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , ) A__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) ) self.add(_snake_case ) A__ = [] A__ = [] A__ = [] for i, rect in enumerate(_snake_case ): A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 ) cpu_target.move_to(_snake_case ) cpu_target.generate_target() A__ = 0.46 / 4 A__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 ) cpu_targs.append(_snake_case ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) ) second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) ) self.play(*_snake_case ) self.play(*_snake_case ) self.wait()
701
from typing import Dict from .base import GenericTensor, Pipeline class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ): """simple docstring""" if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ): """simple docstring""" A__ = self.framework A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def _a ( self : List[Any] , _snake_case : Dict ): """simple docstring""" A__ = self.model(**_snake_case ) return model_outputs def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ): """simple docstring""" return super().__call__(*_snake_case , **_snake_case )
52
0
from PIL import Image def A ( __UpperCamelCase ) -> Image: A__ , A__ = image.size A__ = 0 A__ = image.load() for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): A__ = pixels[j, i] mean += pixel mean //= width * height for j in range(__UpperCamelCase ): for i in range(__UpperCamelCase ): A__ = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = mean_threshold(Image.open('''path_to_image''').convert('''L''')) image.save('''output_image_path''')
702
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) A__ : str = field(metadata={"help": "Should contain the data files for the task."} ) A__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: A__ = processors[data_args.task_name]() A__ = processor.get_labels() A__ = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase ) -> Dict: A__ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )} # Data collator A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A__ = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def A ( __UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
52
0